Bagatur/oai assistant (#13010)

This commit is contained in:
Bagatur 2023-11-07 11:44:53 -08:00 committed by GitHub
parent 74134dd7e1
commit 57e19989f6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 567 additions and 17 deletions

View File

@ -17,12 +17,12 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install \"openai>=1\""
"!pip install -U openai \"langchain>=0.0.331rc1\" langchain-experimental"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"id": "c3e067ce-7a43-47a7-bc89-41f1de4cf136",
"metadata": {},
"outputs": [],
@ -43,17 +43,17 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 3,
"id": "1c8c3965-d3c9-4186-b5f3-5e67855ef916",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='The image appears to be a diagram representing the architecture or components of a software platform named \"LangChain.\" This diagram outlines various layers and elements of the platform, which seems to be related to language processing or computational linguistics, as suggested by the context clues in the names of the components.\\n\\nHere\\'s a breakdown of the components shown:\\n\\n- **LangSmith**: This seems to be a tool or suite related to testing, evaluation, monitoring, feedback, and annotation within the platform.\\n\\n- **LangServe**: This could represent a service layer that exposes the platform\\'s capabilities as REST API endpoints.\\n\\n- **Templates**: These are likely reference applications provided as starting points or examples for users of the platform.\\n\\n- **Chains, agents, agent executors**: This section describes the common application logic, perhaps indicating that the platform uses a chain of agents or processes to execute tasks.\\n\\n- **Model I/O**: This includes the components related to input/output processing for a model, like prompt, example selector, model, and output parser.\\n\\n- **Retrieval**: These components are involved in retrieving documents, splitting text, and managing embeddings and vector stores, which are important for tasks like search and information retrieval.\\n\\n- **Agent tooling**: This might refer to the tools used for creating,')"
"AIMessage(content='The image appears to be a diagram of a software architecture or framework related to language processing, named \"LangChain.\" This architecture seems to be designed to handle various aspects of natural language understanding or processing tasks. Here are the components as labeled in the diagram:\\n\\n1. **LangSmith**: This component is related to testing, evaluation, monitoring, feedback, and annotation. It also has a debugging aspect to it.\\n2. **LangServe**: This is a service that can chain as REST API, indicating it might serve language processing capabilities over a network.\\n3. **Templates**: Reference applications are mentioned here, which might be used as starting points or examples for building new applications.\\n4. **Chains, agents, agent executors**: This part of the system handles common application logic, potentially acting as the middleware or the operational logic of the framework.\\n5. **Model I/O**: Input/output management for the model, including prompts, example selectors, the models themselves, and output parsers.\\n6. **Retrieval**: Components related to retrieving documents, including a document loader, text splitter, embedding model, vector store, and retriever. These are likely used for finding and processing relevant information from a large corpus of text.\\n7. **Agent tooling**: This might refer')"
]
},
"execution_count": 2,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
@ -78,6 +78,227 @@
")"
]
},
{
"cell_type": "markdown",
"id": "210f8248-fcf3-4052-a4a3-0684e08f8785",
"metadata": {},
"source": [
"## [OpenAI assistants](https://platform.openai.com/docs/assistants/overview)\n",
"\n",
"> The Assistants API allows you to build AI assistants within your own applications. An Assistant has instructions and can leverage models, tools, and knowledge to respond to user queries. The Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling\n",
"\n",
"\n",
"You can interact with OpenAI Assistants using OpenAI tools or custom tools. When using exclusively OpenAI tools, you can just invoke the assistant directly and get final answers. When using custom tools, you can run the assistant and tool execution loop using the built-in AgentExecutor or easily write your own executor.\n",
"\n",
"Below we show the different ways to interact with Assistants. As a simple example, let's build a math tutor that can write and run code."
]
},
{
"cell_type": "markdown",
"id": "318da28d-4cec-42ab-ae3e-76d95bb34fa5",
"metadata": {},
"source": [
"### Using only OpenAI tools"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "a9064bbe-d9f7-4a29-a7b3-73933b3197e7",
"metadata": {},
"outputs": [],
"source": [
"from langchain_experimental.openai_assistant import OpenAIAssistantRunnable\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "7a20a008-49ac-46d2-aa26-b270118af5ea",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[ThreadMessage(id='msg_RGOsJ2RBYp79rILrZ0NsAX68', assistant_id='asst_9Xb1ZgefoAbp2V5ddRlDGisy', content=[MessageContentText(text=Text(annotations=[], value='\\\\( 10 - 4^{2.7} \\\\) is approximately -32.2243.'), type='text')], created_at=1699385426, file_ids=[], metadata={}, object='thread.message', role='assistant', run_id='run_E2PRoP04ryly4p5ds5ek2qxs', thread_id='thread_pu9CpsYIWWZtxemZxpbYfhm4')]"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"interpreter_assistant = OpenAIAssistantRunnable.create_assistant(\n",
" name=\"langchain assistant\",\n",
" instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n",
" tools=[{\"type\": \"code_interpreter\"}],\n",
" model=\"gpt-4-1106-preview\"\n",
")\n",
"output = interpreter_assistant.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})\n",
"output"
]
},
{
"cell_type": "markdown",
"id": "a8ddd181-ac63-4ab6-a40d-a236120379c1",
"metadata": {},
"source": [
"### As a LangChain agent with arbitrary tools\n",
"\n",
"Now let's recreate this functionality using our own tools. For this example we'll use the [E2B sandbox runtime tool](https://e2b.dev/docs?ref=landing-page-get-started)."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "48681ac7-b267-48d4-972c-8a7df8393a21",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools import E2BDataAnalysisTool\n",
"\n",
"tools = [E2BDataAnalysisTool(api_key=\"...\")]"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "1c01dd79-dd3e-4509-a2e2-009a7f99f16a",
"metadata": {},
"outputs": [],
"source": [
"agent = OpenAIAssistantRunnable.create_assistant(\n",
" name=\"langchain assistant e2b tool\",\n",
" instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n",
" tools=tools,\n",
" model=\"gpt-4-1106-preview\",\n",
" as_agent=True\n",
")"
]
},
{
"cell_type": "markdown",
"id": "1ac71d8b-4b4b-4f98-b826-6b3c57a34166",
"metadata": {},
"source": [
"#### Using AgentExecutor"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "1f137f94-801f-4766-9ff5-2de9df5e8079",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'content': \"What's 10 - 4 raised to the 2.7\",\n",
" 'output': 'The result of \\\\(10 - 4\\\\) raised to the power of \\\\(2.7\\\\) is approximately \\\\(126.19\\\\).'}"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.agents import AgentExecutor\n",
"\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools)\n",
"agent_executor.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})"
]
},
{
"cell_type": "markdown",
"id": "2d0a0b1d-c1b3-4b50-9dce-1189b51a6206",
"metadata": {},
"source": [
"#### Custom execution"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "c0475fa7-b6c1-4331-b8e2-55407466c724",
"metadata": {},
"outputs": [],
"source": [
"agent = OpenAIAssistantRunnable.create_assistant(\n",
" name=\"langchain assistant e2b tool\",\n",
" instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n",
" tools=tools,\n",
" model=\"gpt-4-1106-preview\",\n",
" as_agent=True\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "b76cb669-6aba-4827-868f-00aa960026f2",
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.agent import AgentFinish\n",
"\n",
"def execute_agent(agent, tools, input):\n",
" tool_map = {tool.name: tool for tool in tools}\n",
" response = agent.invoke(input)\n",
" while not isinstance(response, AgentFinish):\n",
" tool_outputs = []\n",
" for action in response:\n",
" tool_output = tool_map[action.tool].invoke(action.tool_input)\n",
" print(action.tool, action.tool_input, tool_output, end=\"\\n\\n\")\n",
" tool_outputs.append({\"output\": tool_output, \"tool_call_id\": action.tool_call_id})\n",
" response = agent.invoke({\"tool_outputs\": tool_outputs, \"run_id\": action.run_id, \"thread_id\": action.thread_id})\n",
" \n",
" return response"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "7946116a-b82f-492e-835e-ca958a8949a5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"e2b_data_analysis {'python_code': 'print((10 - 4) ** 2.7)'} {\"stdout\": \"126.18518711065899\", \"stderr\": \"\", \"artifacts\": []}\n",
"\n",
"\\( 10 - 4 \\) raised to the power of 2.7 is approximately 126.185.\n"
]
}
],
"source": [
"response = execute_agent(agent, tools, {\"content\": \"What's 10 - 4 raised to the 2.7\"})\n",
"print(response.return_values[\"output\"])"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "f2744a56-9f4f-4899-827a-fa55821c318c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"e2b_data_analysis {'python_code': 'result = (10 - 4) ** 2.7 + 17.241\\nprint(result)'} {\"stdout\": \"143.426187110659\", \"stderr\": \"\", \"artifacts\": []}\n",
"\n",
"\\( (10 - 4)^{2.7} + 17.241 \\) is approximately 143.426.\n"
]
}
],
"source": [
"next_response = execute_agent(agent, tools, {\"content\": \"now add 17.241\", \"thread_id\": response.thread_id})\n",
"print(next_response.return_values[\"output\"])"
]
},
{
"cell_type": "markdown",
"id": "71c34763-d1e7-4b9a-a9d7-3e4cc0dfc2c4",
@ -92,7 +313,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 16,
"id": "db6072c4-f3f3-415d-872b-71ea9f3c02bb",
"metadata": {},
"outputs": [
@ -135,7 +356,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 17,
"id": "08e00ccf-b991-4249-846b-9500a0ccbfa0",
"metadata": {},
"outputs": [
@ -146,7 +367,7 @@
" {'name': 'Deepmind', 'origin': 'UK'}]}"
]
},
"execution_count": 9,
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
@ -169,7 +390,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 18,
"id": "1281883c-bf8f-4665-89cd-4f33ccde69ab",
"metadata": {},
"outputs": [
@ -197,14 +418,6 @@
")\n",
"print(output.llm_output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5c637ba1-322d-4fc9-b97e-3afa83dc4d72",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

View File

@ -0,0 +1,3 @@
from langchain_experimental.openai_assistant.base import OpenAIAssistantRunnable
__all__ = ["OpenAIAssistantRunnable"]

View File

@ -0,0 +1,334 @@
from __future__ import annotations
import json
from time import sleep
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from langchain.pydantic_v1 import Field
from langchain.schema.agent import AgentAction, AgentFinish
from langchain.schema.runnable import RunnableConfig, RunnableSerializable
from langchain.tools import format_tool_to_openai_function
from langchain.tools.base import BaseTool
if TYPE_CHECKING:
import openai
from openai.types.beta.threads import ThreadMessage
from openai.types.beta.threads.required_action_function_tool_call import (
RequiredActionFunctionToolCall,
)
class OpenAIAssistantFinish(AgentFinish):
"""AgentFinish with run and thread metadata."""
run_id: str
thread_id: str
class OpenAIAssistantAction(AgentAction):
"""AgentAction with info needed to submit custom tool output to existing run."""
tool_call_id: str
run_id: str
thread_id: str
def _get_openai_client() -> openai.OpenAI:
try:
import openai
return openai.OpenAI()
except ImportError as e:
raise ImportError(
"Unable to import openai, please install with `pip install openai`."
) from e
except AttributeError as e:
raise AttributeError(
"Please make sure you are using a v1.1-compatible version of openai. You "
'can install with `pip install "openai>=1.1"`.'
) from e
OutputType = Union[
List[OpenAIAssistantAction],
OpenAIAssistantFinish,
List["ThreadMessage"],
List["RequiredActionFunctionToolCall"],
]
class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
"""Run an OpenAI Assistant.
Example using OpenAI tools:
.. code-block:: python
from langchain_experimental.openai_assistant import OpenAIAssistantRunnable
interpreter_assistant = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview"
)
output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"})
Example using custom tools and AgentExecutor:
.. code-block:: python
from langchain_experimental.openai_assistant import OpenAIAssistantRunnable
from langchain.agents import AgentExecutor
from langchain.tools import E2BDataAnalysisTool
tools = [E2BDataAnalysisTool(api_key="...")]
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True
)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"content": "What's 10 - 4 raised to the 2.7"})
Example using custom tools and custom execution:
.. code-block:: python
from langchain_experimental.openai_assistant import OpenAIAssistantRunnable
from langchain.agents import AgentExecutor
from langchain.schema.agent import AgentFinish
from langchain.tools import E2BDataAnalysisTool
tools = [E2BDataAnalysisTool(api_key="...")]
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True
)
def execute_agent(agent, tools, input):
tool_map = {tool.name: tool for tool in tools}
response = agent.invoke(input)
while not isinstance(response, AgentFinish):
tool_outputs = []
for action in response:
tool_output = tool_map[action.tool].invoke(action.tool_input)
print(action.tool, action.tool_input, tool_output, end="\n\n")
tool_outputs.append({"output": tool_output, "tool_call_id": action.tool_call_id})
response = agent.invoke(
{
"tool_outputs": tool_outputs,
"run_id": action.run_id,
"thread_id": action.thread_id
}
)
return response
response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"})
next_response = execute_agent(agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id})
""" # noqa: E501
client: openai.OpenAI = Field(default_factory=_get_openai_client)
"""OpenAI client."""
assistant_id: str
"""OpenAI assistant id."""
check_every_ms: float = 1_000.0
"""Frequency with which to check run progress in ms."""
as_agent: bool = False
"""Use as a LangChain agent, compatible with the AgentExecutor."""
@classmethod
def create_assistant(
cls,
name: str,
instructions: str,
tools: Sequence[Union[BaseTool, dict]],
model: str,
*,
client: Optional[openai.OpenAI] = None,
**kwargs: Any,
) -> OpenAIAssistantRunnable:
"""Create an OpenAI Assistant and instantiate the Runnable.
Args:
name: Assistant name.
instructions: Assistant instructions.
tools: Assistant tools. Can be passed in in OpenAI format or as BaseTools.
model: Assistant model to use.
client: OpenAI client. Will create default client if not specified.
Returns:
OpenAIAssistantRunnable configured to run using the created assistant.
"""
client = client or _get_openai_client()
openai_tools: List = []
for tool in tools:
if isinstance(tool, BaseTool):
tool = {
"type": "function",
"function": format_tool_to_openai_function(tool),
}
openai_tools.append(tool)
assistant = client.beta.assistants.create(
name=name,
instructions=instructions,
tools=openai_tools,
model=model,
)
return cls(assistant_id=assistant.id, **kwargs)
def invoke(
self, input: dict, config: Optional[RunnableConfig] = None
) -> OutputType:
"""Invoke assistant.
Args:
input: Runnable input dict that can have:
content: User message when starting a new run.
thread_id: Existing thread to use.
run_id: Existing run to use. Should only be supplied when providing
the tool output for a required action after an initial invocation.
file_ids: File ids to include in new run. Used for retrieval.
message_metadata: Metadata to associate with new message.
thread_metadata: Metadata to associate with new thread. Only relevant
when new thread being created.
instructions: Additional run instructions.
model: Override Assistant model for this run.
tools: Override Assistant tools for this run.
run_metadata: Metadata to associate with new run.
config: Runnable config:
Return:
If self.as_agent, will return
Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise
will return OpenAI types
Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]].
"""
input = self._parse_input(input)
if "thread_id" not in input:
thread = {
"messages": [
{
"role": "user",
"content": input["content"],
"file_ids": input.get("file_ids", []),
"metadata": input.get("message_metadata"),
}
],
"metadata": input.get("thread_metadata"),
}
run = self._create_thread_and_run(input, thread)
elif "run_id" not in input:
_ = self.client.beta.threads.messages.create(
input["thread_id"],
content=input["content"],
role="user",
file_ids=input.get("file_ids", []),
metadata=input.get("message_metadata"),
)
run = self._create_run(input)
else:
run = self.client.beta.threads.runs.submit_tool_outputs(**input)
return self._get_response(run.id, run.thread_id)
def _parse_input(self, input: dict) -> dict:
if self.as_agent and input.get("intermediate_steps"):
last_action, last_output = input["intermediate_steps"][-1]
input = {
"tool_outputs": [
{"output": last_output, "tool_call_id": last_action.tool_call_id}
],
"run_id": last_action.run_id,
"thread_id": last_action.thread_id,
}
return input
def _create_run(self, input: dict) -> Any:
params = {
k: v
for k, v in input.items()
if k in ("instructions", "model", "tools", "run_metadata")
}
return self.client.beta.threads.runs.create(
input["thread_id"],
assistant_id=self.assistant_id,
**params,
)
def _create_thread_and_run(self, input: dict, thread: dict) -> Any:
params = {
k: v
for k, v in input.items()
if k in ("instructions", "model", "tools", "run_metadata")
}
run = self.client.beta.threads.create_and_run(
assistant_id=self.assistant_id,
thread=thread,
**params,
)
return run
def _get_response(self, run_id: str, thread_id: str) -> Any:
# TODO: Pagination
import openai
run = self._wait_for_run(run_id, thread_id)
if run.status == "completed":
messages = self.client.beta.threads.messages.list(thread_id, order="asc")
new_messages = [msg for msg in messages if msg.run_id == run_id]
if not self.as_agent:
return new_messages
answer: Any = [
msg_content for msg in new_messages for msg_content in msg.content
]
if all(
isinstance(content, openai.types.beta.threads.MessageContentText)
for content in answer
):
answer = "\n".join(content.text.value for content in answer)
return OpenAIAssistantFinish(
return_values={"output": answer},
log="",
run_id=run_id,
thread_id=thread_id,
)
elif run.status == "requires_action":
if not self.as_agent:
return run.required_action.submit_tool_outputs.tool_calls
actions = []
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
function = tool_call.function
args = json.loads(function.arguments)
actions.append(
OpenAIAssistantAction(
tool=function.name,
tool_input=args,
tool_call_id=tool_call.id,
log="",
run_id=run_id,
thread_id=thread_id,
)
)
return actions
else:
run_info = json.dumps(run.dict(), indent=2)
raise ValueError(
f"Unknown run status {run.status}. Full run info:\n\n{run_info})"
)
def _wait_for_run(self, run_id: str, thread_id: str) -> Any:
in_progress = True
while in_progress:
run = self.client.beta.threads.runs.retrieve(run_id, thread_id=thread_id)
in_progress = run.status in ("in_progress", "queued")
if in_progress:
sleep(self.check_every_ms / 1000)
return run