diff --git a/docs/docs/integrations/llms/ibm_watsonx.ipynb b/docs/docs/integrations/llms/ibm_watsonx.ipynb index 0a0168a53fd..f045696343c 100644 --- a/docs/docs/integrations/llms/ibm_watsonx.ipynb +++ b/docs/docs/integrations/llms/ibm_watsonx.ipynb @@ -24,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 1, "id": "2f1fff4e", "metadata": {}, "outputs": [], @@ -45,7 +45,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "11d572a1", "metadata": {}, "outputs": [], @@ -93,7 +93,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "407cd500", "metadata": {}, "outputs": [], @@ -194,6 +194,28 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "7c4a632b", + "metadata": {}, + "source": [ + "You can also pass the IBM's [`ModelInference`](https://ibm.github.io/watsonx-ai-python-sdk/fm_model_inference.html) object into `WatsonxLLM` class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5335b148", + "metadata": {}, + "outputs": [], + "source": [ + "from ibm_watsonx_ai.foundation_models import ModelInference\n", + "\n", + "model = ModelInference(...)\n", + "\n", + "watsonx_llm = WatsonxLLM(watsonx_model=model)" + ] + }, { "cell_type": "markdown", "id": "c25ecbd1", @@ -213,6 +235,7 @@ "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"Generate a random question about {topic}: Question: \"\n", + "\n", "prompt = PromptTemplate.from_template(template)" ] }, @@ -221,31 +244,32 @@ "id": "79056d8e", "metadata": {}, "source": [ - "Provide a topic and run the `LLMChain`." + "Provide a topic and run the chain." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "id": "dc076c56", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'topic': 'dog', 'text': 'Why do dogs howl?'}" + "'What is the difference between a dog and a wolf?'" ] }, - "execution_count": 10, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain.chains import LLMChain\n", + "llm_chain = prompt | watsonx_llm\n", "\n", - "llm_chain = LLMChain(prompt=prompt, llm=watsonx_llm)\n", - "llm_chain.invoke(\"dog\")" + "topic = \"dog\"\n", + "\n", + "llm_chain.invoke(topic)" ] }, {