This commit is contained in:
isaac hershenson
2024-08-09 10:29:13 -07:00
parent dfab23f931
commit 91c2dd92b1
2 changed files with 1 additions and 339 deletions

View File

@@ -368,7 +368,7 @@ MISCELLANEOUS_TOOL_FEAT_TABLE = {
},
"Robocorp": {
"link": "/docs/integrations/tools/robocorp",
"description": "Integrate custom actions with your agents"
"description": "Integrate custom actions with your agents",
},
"Human as a tool": {
"link": "/docs/integrations/tools/human_tools",

View File

@@ -1,338 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"from typing import List, Tuple\n",
"from typing_extensions import Annotated\n",
"\n",
"from langchain_core.documents import Document\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"from langchain_core.tools import tool\n",
"\n",
"from langgraph.prebuilt import InjectedState\n",
"\n",
"\n",
"@tool(parse_docstring=True, response_format=\"content_and_artifact\")\n",
"def get_context(question: List[str]) -> Tuple[str, List[Document]]:\n",
" \"\"\"Get context on the question.\n",
"\n",
" Args:\n",
" question: The user question\n",
" \"\"\"\n",
" # return constant dummy output\n",
" docs = [\n",
" Document(\n",
" \"FooBar company just raised 1 Billion dollars!\",\n",
" metadata={\"source\": \"twitter\"},\n",
" ),\n",
" Document(\n",
" \"FooBar company is now only hiring AI's\", metadata={\"source\": \"twitter\"}\n",
" ),\n",
" Document(\n",
" \"FooBar company was founded in 2019\", metadata={\"source\": \"wikipedia\"}\n",
" ),\n",
" Document(\n",
" \"FooBar company makes friendly robots\", metadata={\"source\": \"wikipedia\"}\n",
" ),\n",
" ]\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs), docs\n",
"\n",
"\n",
"@tool(parse_docstring=True, response_format=\"content_and_artifact\")\n",
"def cite_context_sources(\n",
" claim: str, state: Annotated[dict, InjectedState]\n",
") -> Tuple[str, List[Document]]:\n",
" \"\"\"Cite which source a claim was based on.\n",
"\n",
" Args:\n",
" claim: The claim that was made.\n",
" \"\"\"\n",
" docs = []\n",
" # We get the potentially cited docs from past ToolMessages in our state.\n",
" for msg in state[\"messages\"]:\n",
" if isinstance(msg, ToolMessage) and msg.name == \"get_context\":\n",
" docs.extend(msg.artifact)\n",
"\n",
" class Cite(BaseModel):\n",
" \"\"\"Return the index(es) of the documents that justify the claim\"\"\"\n",
"\n",
" indexes: List[int]\n",
"\n",
" structured_model = model.with_structured_output(Cite)\n",
" system = f\"Which of the following documents best justifies the claim:\\n\\n{claim}\"\n",
" context = \"\\n\\n\".join(\n",
" f\"Document {i}:\\n\" + doc.page_content for i, doc in enumerate(docs)\n",
" )\n",
" citation = structured_model.invoke([(\"system\", system), (\"human\", context)])\n",
" cited_docs = [docs[i] for i in citation.indexes]\n",
" sources = \", \".join(doc.metadata[\"source\"] for doc in cited_docs)\n",
" return sources, cited_docs"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"import operator\n",
"from typing import Annotated, Sequence, TypedDict\n",
"\n",
"from langchain_core.messages import BaseMessage\n",
"\n",
"\n",
"class AgentState(TypedDict):\n",
" messages: Annotated[Sequence[BaseMessage], operator.add]"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"from copy import deepcopy\n",
"\n",
"from langchain_core.messages import ToolMessage\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"from langgraph.prebuilt import ToolNode\n",
"\n",
"model = ChatOpenAI(model=\"gpt-4o\", temperature=0)\n",
"\n",
"\n",
"# Define the function that determines whether to continue or not\n",
"def should_continue(state, config):\n",
" messages = state[\"messages\"]\n",
" last_message = messages[-1]\n",
" # If there is no function call, then we finish\n",
" if not last_message.tool_calls:\n",
" return \"end\"\n",
" # Otherwise if there is, we continue\n",
" else:\n",
" return \"continue\"\n",
"\n",
"\n",
"tools = [get_context, cite_context_sources]\n",
"\n",
"# Define the function that calls the model\n",
"def call_model(state, config):\n",
" messages = state[\"messages\"]\n",
" model_with_tools = model.bind_tools(tools)\n",
" response = model_with_tools.invoke(messages)\n",
" # We return a list, because this will get added to the existing list\n",
" return {\"messages\": [response]}\n",
"\n",
"\n",
"# ToolNode will automatically take care of injecting state into tools\n",
"tool_node = ToolNode(tools)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"from langgraph.graph import END, START, StateGraph\n",
"\n",
"# Define a new graph\n",
"workflow = StateGraph(AgentState)\n",
"\n",
"# Define the two nodes we will cycle between\n",
"workflow.add_node(\"agent\", call_model)\n",
"workflow.add_node(\"action\", tool_node)\n",
"\n",
"# Set the entrypoint as `agent`\n",
"# This means that this node is the first one called\n",
"workflow.add_edge(START, \"agent\")\n",
"\n",
"# We now add a conditional edge\n",
"workflow.add_conditional_edges(\n",
" # First, we define the start node. We use `agent`.\n",
" # This means these are the edges taken after the `agent` node is called.\n",
" \"agent\",\n",
" # Next, we pass in the function that will determine which node is called next.\n",
" should_continue,\n",
" # Finally we pass in a mapping.\n",
" # The keys are strings, and the values are other nodes.\n",
" # END is a special node marking that the graph should finish.\n",
" # What will happen is we will call `should_continue`, and then the output of that\n",
" # will be matched against the keys in this mapping.\n",
" # Based on which one it matches, that node will then be called.\n",
" {\n",
" # If `tools`, then we call the tool node.\n",
" \"continue\": \"action\",\n",
" # Otherwise we finish.\n",
" \"end\": END,\n",
" },\n",
")\n",
"\n",
"# We now add a normal edge from `tools` to `agent`.\n",
"# This means that after `tools` is called, `agent` node is called next.\n",
"workflow.add_edge(\"action\", \"agent\")\n",
"\n",
"# Finally, we compile it!\n",
"# This compiles it into a LangChain Runnable,\n",
"# meaning you can use it as you would any other runnable\n",
"app = workflow.compile()"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Output from node 'agent':\n",
"---\n",
"{'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_fjVgtlkC4uIYgmtWDiRC4PLJ', 'function': {'arguments': '{\"question\":[\"what\\'s the latest news about FooBar\"]}', 'name': 'get_context'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 22, 'prompt_tokens': 87, 'total_tokens': 109}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_3aa7262c27', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-b79ed8f3-f4cd-407c-9595-618b34561cf6-0', tool_calls=[{'name': 'get_context', 'args': {'question': [\"what's the latest news about FooBar\"]}, 'id': 'call_fjVgtlkC4uIYgmtWDiRC4PLJ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 87, 'output_tokens': 22, 'total_tokens': 109})]}\n",
"\n",
"---\n",
"\n",
"Output from node 'action':\n",
"---\n",
"{'messages': [ToolMessage(content=\"FooBar company just raised 1 Billion dollars!\\n\\nFooBar company is now only hiring AI's\\n\\nFooBar company was founded in 2019\\n\\nFooBar company makes friendly robots\", name='get_context', tool_call_id='call_fjVgtlkC4uIYgmtWDiRC4PLJ', artifact=[Document(metadata={'source': 'twitter'}, page_content='FooBar company just raised 1 Billion dollars!'), Document(metadata={'source': 'twitter'}, page_content=\"FooBar company is now only hiring AI's\"), Document(metadata={'source': 'wikipedia'}, page_content='FooBar company was founded in 2019'), Document(metadata={'source': 'wikipedia'}, page_content='FooBar company makes friendly robots')])]}\n",
"\n",
"---\n",
"\n",
"Output from node 'agent':\n",
"---\n",
"{'messages': [AIMessage(content=\"Here are the latest news updates about FooBar:\\n\\n1. **Funding News**: FooBar company has recently raised $1 billion.\\n2. **Hiring Update**: FooBar company is now exclusively hiring AI's.\\n3. **Company Background**: FooBar company was founded in 2019 and specializes in making friendly robots.\", response_metadata={'token_usage': {'completion_tokens': 68, 'prompt_tokens': 153, 'total_tokens': 221}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_3aa7262c27', 'finish_reason': 'stop', 'logprobs': None}, id='run-a1c848e7-4d41-4ae2-8027-3735d509bd9c-0', usage_metadata={'input_tokens': 153, 'output_tokens': 68, 'total_tokens': 221})]}\n",
"\n",
"---\n",
"\n"
]
}
],
"source": [
"from langchain_core.messages import HumanMessage\n",
"\n",
"messages = [HumanMessage(\"what's the latest news about FooBar\")]\n",
"for output in app.stream({\"messages\": messages}):\n",
" # stream() yields dictionaries with output keyed by node name\n",
" for key, value in output.items():\n",
" print(f\"Output from node '{key}':\")\n",
" print(\"---\")\n",
" print(value)\n",
" messages.extend(value[\"messages\"])\n",
" print(\"\\n---\\n\")"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"from langgraph_sdk import get_client\n",
"\n",
"client = get_client(url=\"https://testing-imports-langgraph-r-a12e17b191b0592c8e5b9c432b7b8af7.default.us.langgraph.app\")"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"assistants = await client.assistants.search()\n",
"assistant = assistants[0]"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "Assistants.create() got an unexpected keyword argument 'file_ids'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[1], line 8\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m#from langchain.tools import E2BDataAnalysisTool\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \n\u001b[1;32m 5\u001b[0m \n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m#tools = [E2BDataAnalysisTool(api_key=\"...\")]\u001b[39;00m\n\u001b[1;32m 7\u001b[0m tools\u001b[38;5;241m=\u001b[39m[]\n\u001b[0;32m----> 8\u001b[0m agent \u001b[38;5;241m=\u001b[39m \u001b[43mOpenAIAssistantRunnable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcreate_assistant\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 9\u001b[0m \u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mlangchain assistant e2b tool\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 10\u001b[0m \u001b[43m \u001b[49m\u001b[43minstructions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mYou are a personal math tutor. Write and run code to answer math questions.\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 11\u001b[0m \u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtools\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 12\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mgpt-4-1106-preview\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 13\u001b[0m \u001b[43m \u001b[49m\u001b[43mas_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\n\u001b[1;32m 14\u001b[0m \u001b[43m)\u001b[49m\n\u001b[1;32m 16\u001b[0m agent_executor \u001b[38;5;241m=\u001b[39m AgentExecutor(agent\u001b[38;5;241m=\u001b[39magent, tools\u001b[38;5;241m=\u001b[39mtools)\n\u001b[1;32m 17\u001b[0m agent_executor\u001b[38;5;241m.\u001b[39minvoke({\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mWhat\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124ms 10 - 4 raised to the 2.7\u001b[39m\u001b[38;5;124m\"\u001b[39m})\n",
"File \u001b[0;32m~/.pyenv/versions/3.11.9/lib/python3.11/site-packages/langchain/agents/openai_assistant/base.py:270\u001b[0m, in \u001b[0;36mOpenAIAssistantRunnable.create_assistant\u001b[0;34m(cls, name, instructions, tools, model, client, **kwargs)\u001b[0m\n\u001b[1;32m 255\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Create an OpenAI Assistant and instantiate the Runnable.\u001b[39;00m\n\u001b[1;32m 256\u001b[0m \n\u001b[1;32m 257\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 267\u001b[0m \u001b[38;5;124;03m OpenAIAssistantRunnable configured to run using the created assistant.\u001b[39;00m\n\u001b[1;32m 268\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 269\u001b[0m client \u001b[38;5;241m=\u001b[39m client \u001b[38;5;129;01mor\u001b[39;00m _get_openai_client()\n\u001b[0;32m--> 270\u001b[0m assistant \u001b[38;5;241m=\u001b[39m \u001b[43mclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbeta\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43massistants\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcreate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 271\u001b[0m \u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 272\u001b[0m \u001b[43m \u001b[49m\u001b[43minstructions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minstructions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 273\u001b[0m \u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43m_get_assistants_tool\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mtool\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore\u001b[39;49;00m\n\u001b[1;32m 274\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 275\u001b[0m \u001b[43m \u001b[49m\u001b[43mfile_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfile_ids\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 276\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 277\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mcls\u001b[39m(assistant_id\u001b[38;5;241m=\u001b[39massistant\u001b[38;5;241m.\u001b[39mid, client\u001b[38;5;241m=\u001b[39mclient, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
"\u001b[0;31mTypeError\u001b[0m: Assistants.create() got an unexpected keyword argument 'file_ids'"
]
}
],
"source": [
"from langchain_experimental.openai_assistant import OpenAIAssistantRunnable\n",
"from langchain.agents import AgentExecutor\n",
"#from langchain.tools import E2BDataAnalysisTool\n",
"\n",
"\n",
"#tools = [E2BDataAnalysisTool(api_key=\"...\")]\n",
"tools=[]\n",
"agent = OpenAIAssistantRunnable.create_assistant(\n",
" name=\"langchain assistant e2b tool\",\n",
" instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n",
" tools=tools,\n",
" model=\"gpt-4-1106-preview\",\n",
" as_agent=True\n",
")\n",
"\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools)\n",
"agent_executor.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"ename": "ModuleNotFoundError",
"evalue": "No module named 'langgraph.checkpoint_sqlite'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[3], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlanggraph\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcheckpoint_sqlite\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01maio\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AsyncSqliteSaver\n",
"\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'langgraph.checkpoint_sqlite'"
]
}
],
"source": [
"from langgraph.checkpoint_sqlite.aio import AsyncSqliteSaver"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}