diff --git a/libs/cli/langchain_cli/integration_template/docs/tools.ipynb b/libs/cli/langchain_cli/integration_template/docs/tools.ipynb index 7ca2ac5378d..f62e83fc6d8 100644 --- a/libs/cli/langchain_cli/integration_template/docs/tools.ipynb +++ b/libs/cli/langchain_cli/integration_template/docs/tools.ipynb @@ -181,11 +181,11 @@ "id": "659f9fbd-6fcf-445f-aa8c-72d8e60154bd", "metadata": {}, "source": [ - "## Chaining\n", + "## Use within an agent\n", "\n", "- TODO: Add user question and run cells\n", "\n", - "We can use our tool in a chain by first binding it to a [tool-calling model](/docs/how_to/tool_calling/) and then calling it:\n", + "We can use our tool in an [agent](/docs/concepts/agents/). For this we will need a LLM with [tool-calling](/docs/how_to/tool_calling/) capabilities:\n", "\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", @@ -208,6 +208,19 @@ "llm = init_chat_model(model=\"gpt-4o\", model_provider=\"openai\")" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "bea35fa1", + "metadata": {}, + "outputs": [], + "source": [ + "from langgraph.prebuilt import create_react_agent\n", + "\n", + "tools = [tool]\n", + "agent = create_react_agent(llm, tools)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -215,32 +228,14 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables import RunnableConfig, chain\n", + "example_query = \"...\"\n", "\n", - "prompt = ChatPromptTemplate(\n", - " [\n", - " (\"system\", \"You are a helpful assistant.\"),\n", - " (\"human\", \"{user_input}\"),\n", - " (\"placeholder\", \"{messages}\"),\n", - " ]\n", + "events = agent.stream(\n", + " {\"messages\": [(\"user\", example_query)]},\n", + " stream_mode=\"values\",\n", ")\n", - "\n", - "# specifying tool_choice will force the model to call this tool.\n", - "llm_with_tools = llm.bind_tools([tool], tool_choice=tool.name)\n", - "\n", - "llm_chain = prompt | llm_with_tools\n", - "\n", - "\n", - "@chain\n", - "def tool_chain(user_input: str, config: RunnableConfig):\n", - " input_ = {\"user_input\": user_input}\n", - " ai_msg = llm_chain.invoke(input_, config=config)\n", - " tool_msgs = tool.batch(ai_msg.tool_calls, config=config)\n", - " return llm_chain.invoke({**input_, \"messages\": [ai_msg, *tool_msgs]}, config=config)\n", - "\n", - "\n", - "tool_chain.invoke(\"...\")" + "for event in events:\n", + " event[\"messages\"][-1].pretty_print()" ] }, {