mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-19 05:13:46 +00:00
openai[patch]: support Responses API (#30231)
Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
parent
49bdd3b6fe
commit
cd1ea8e94d
38
.github/workflows/_release.yml
vendored
38
.github/workflows/_release.yml
vendored
@ -100,15 +100,32 @@ jobs:
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
VERSION: ${{ needs.build.outputs.version }}
|
||||
run: |
|
||||
PREV_TAG="$PKG_NAME==${VERSION%.*}.$(( ${VERSION##*.} - 1 ))"; [[ "${VERSION##*.}" -eq 0 ]] && PREV_TAG=""
|
||||
# Handle regular versions and pre-release versions differently
|
||||
if [[ "$VERSION" == *"-"* ]]; then
|
||||
# This is a pre-release version (contains a hyphen)
|
||||
# Extract the base version without the pre-release suffix
|
||||
BASE_VERSION=${VERSION%%-*}
|
||||
# Look for the latest release of the same base version
|
||||
REGEX="^$PKG_NAME==$BASE_VERSION\$"
|
||||
PREV_TAG=$(git tag --sort=-creatordate | (grep -P "$REGEX" || true) | head -1)
|
||||
|
||||
# backup case if releasing e.g. 0.3.0, looks up last release
|
||||
# note if last release (chronologically) was e.g. 0.1.47 it will get
|
||||
# that instead of the last 0.2 release
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
|
||||
echo $REGEX
|
||||
PREV_TAG=$(git tag --sort=-creatordate | (grep -P $REGEX || true) | head -1)
|
||||
# If no exact base version match, look for the latest release of any kind
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
|
||||
PREV_TAG=$(git tag --sort=-creatordate | (grep -P "$REGEX" || true) | head -1)
|
||||
fi
|
||||
else
|
||||
# Regular version handling
|
||||
PREV_TAG="$PKG_NAME==${VERSION%.*}.$(( ${VERSION##*.} - 1 ))"; [[ "${VERSION##*.}" -eq 0 ]] && PREV_TAG=""
|
||||
|
||||
# backup case if releasing e.g. 0.3.0, looks up last release
|
||||
# note if last release (chronologically) was e.g. 0.1.47 it will get
|
||||
# that instead of the last 0.2 release
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
|
||||
echo $REGEX
|
||||
PREV_TAG=$(git tag --sort=-creatordate | (grep -P $REGEX || true) | head -1)
|
||||
fi
|
||||
fi
|
||||
|
||||
# if PREV_TAG is empty, let it be empty
|
||||
@ -363,10 +380,9 @@ jobs:
|
||||
# Shallow-fetch just that single tag
|
||||
git fetch --depth=1 origin tag "$LATEST_PACKAGE_TAG"
|
||||
|
||||
# Navigate to the partner directory
|
||||
cd $GITHUB_WORKSPACE/libs/partners/${{ matrix.partner }}
|
||||
|
||||
# Checkout the latest package files
|
||||
rm -rf $GITHUB_WORKSPACE/libs/partners/${{ matrix.partner }}/*
|
||||
cd $GITHUB_WORKSPACE/libs/partners/${{ matrix.partner }}
|
||||
git checkout "$LATEST_PACKAGE_TAG" -- .
|
||||
|
||||
# Print as a sanity check
|
||||
|
@ -322,7 +322,7 @@
|
||||
"source": [
|
||||
"### ``strict=True``\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-openai>=0.1.21rc1``\n",
|
||||
":::info Requires ``langchain-openai>=0.1.21``\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@ -397,6 +397,405 @@
|
||||
"For more on binding tools and tool call outputs, head to the [tool calling](/docs/how_to/function_calling) docs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84833dd0-17e9-4269-82ed-550639d65751",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Responses API\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-openai>=0.3.9-rc.1``\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"OpenAI supports a [Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions) API that is oriented toward building [agentic](/docs/concepts/agents/) applications. It includes a suite of [built-in tools](https://platform.openai.com/docs/guides/tools?api-mode=responses), including web and file search. It also supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses), allowing you to continue a conversational thread without explicitly passing in previous messages.\n",
|
||||
"\n",
|
||||
"`ChatOpenAI` will route to the Responses API if one of these features is used. You can also specify `use_responses_api=True` when instantiating `ChatOpenAI`.\n",
|
||||
"\n",
|
||||
"### Built-in tools\n",
|
||||
"\n",
|
||||
"Equipping `ChatOpenAI` with built-in tools will ground its responses with outside information, such as via context in files or the web. The [AIMessage](/docs/concepts/messages/#aimessage) generated from the model will include information about the built-in tool invocation.\n",
|
||||
"\n",
|
||||
"#### Web search\n",
|
||||
"\n",
|
||||
"To trigger a web search, pass `{\"type\": \"web_search_preview\"}` to the model as you would another tool.\n",
|
||||
"\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"You can also pass built-in tools as invocation params:\n",
|
||||
"```python\n",
|
||||
"llm.invoke(\"...\", tools=[{\"type\": \"web_search_preview\"}])\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "0d8bfe89-948b-42d4-beac-85ef2a72491d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"tool = {\"type\": \"web_search_preview\"}\n",
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"\n",
|
||||
"response = llm_with_tools.invoke(\"What was a positive news story from today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c9fe67c6-38ff-40a5-93b3-a4b7fca76372",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the response includes structured [content blocks](/docs/concepts/messages/#content-1) that include both the text of the response and OpenAI [annotations](https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#output-and-citations) citing its sources:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3ea5a4b1-f57a-4c8a-97f4-60ab8330a804",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'type': 'text',\n",
|
||||
" 'text': 'Today, a heartwarming story emerged from Minnesota, where a group of high school robotics students built a custom motorized wheelchair for a 2-year-old boy named Cillian Jackson. Born with a genetic condition that limited his mobility, Cillian\\'s family couldn\\'t afford the $20,000 wheelchair he needed. The students at Farmington High School\\'s Rogue Robotics team took it upon themselves to modify a Power Wheels toy car into a functional motorized wheelchair for Cillian, complete with a joystick, safety bumpers, and a harness. One team member remarked, \"I think we won here more than we do in our competitions. Instead of completing a task, we\\'re helping change someone\\'s life.\" ([boredpanda.com](https://www.boredpanda.com/wholesome-global-positive-news/?utm_source=openai))\\n\\nThis act of kindness highlights the profound impact that community support and innovation can have on individuals facing challenges. ',\n",
|
||||
" 'annotations': [{'end_index': 778,\n",
|
||||
" 'start_index': 682,\n",
|
||||
" 'title': '“Global Positive News”: 40 Posts To Remind Us There’s Good In The World',\n",
|
||||
" 'type': 'url_citation',\n",
|
||||
" 'url': 'https://www.boredpanda.com/wholesome-global-positive-news/?utm_source=openai'}]}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "95fbc34c-2f12-4d51-92c5-bf62a2f8900c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"You can recover just the text content of the response as a string by using `response.text()`. For example, to stream response text:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"for token in llm_with_tools.stream(\"...\"):\n",
|
||||
" print(token.text(), end=\"|\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"See the [streaming guide](/docs/how_to/chat_streaming/) for more detail.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2a332940-d409-41ee-ac36-2e9bee900e83",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The output message will also contain information from any tool invocations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "a8011049-6c90-4fcb-82d4-850c72b46941",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'tool_outputs': [{'id': 'ws_67d192aeb6cc81918e736ad4a57937570d6f8507990d9d71',\n",
|
||||
" 'status': 'completed',\n",
|
||||
" 'type': 'web_search_call'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.additional_kwargs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "288d47bb-3ccb-412f-a3d3-9f6cee0e6214",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### File search\n",
|
||||
"\n",
|
||||
"To trigger a file search, pass a [file search tool](https://platform.openai.com/docs/guides/tools-file-search) to the model as you would another tool. You will need to populate an OpenAI-managed vector store and include the vector store ID in the tool definition. See [OpenAI documentation](https://platform.openai.com/docs/guides/tools-file-search) for more detail."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "1f758726-33ef-4c04-8a54-49adb783bbb3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Deep Research by OpenAI is a new capability integrated into ChatGPT that allows for the execution of multi-step research tasks independently. It can synthesize extensive amounts of online information and produce comprehensive reports similar to what a research analyst would do, significantly speeding up processes that would typically take hours for a human.\n",
|
||||
"\n",
|
||||
"### Key Features:\n",
|
||||
"- **Independent Research**: Users simply provide a prompt, and the model can find, analyze, and synthesize information from hundreds of online sources.\n",
|
||||
"- **Multi-Modal Capabilities**: The model is also able to browse user-uploaded files, plot graphs using Python, and embed visualizations in its outputs.\n",
|
||||
"- **Training**: Deep Research has been trained using reinforcement learning on real-world tasks that require extensive browsing and reasoning.\n",
|
||||
"\n",
|
||||
"### Applications:\n",
|
||||
"- Useful for professionals in sectors like finance, science, policy, and engineering, enabling them to obtain accurate and thorough research quickly.\n",
|
||||
"- It can also be beneficial for consumers seeking personalized recommendations on complex purchases.\n",
|
||||
"\n",
|
||||
"### Limitations:\n",
|
||||
"Although Deep Research presents significant advancements, it has some limitations, such as the potential to hallucinate facts or struggle with authoritative information. \n",
|
||||
"\n",
|
||||
"Deep Research aims to facilitate access to thorough and documented information, marking a significant step toward the broader goal of developing artificial general intelligence (AGI).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"openai_vector_store_ids = [\n",
|
||||
" \"vs_...\", # your IDs here\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"tool = {\n",
|
||||
" \"type\": \"file_search\",\n",
|
||||
" \"vector_store_ids\": openai_vector_store_ids,\n",
|
||||
"}\n",
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"\n",
|
||||
"response = llm_with_tools.invoke(\"What is deep research by OpenAI?\")\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f88bbd71-83b0-45a6-9141-46ec9da93df6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As with [web search](#web-search), the response will include content blocks with citations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "865bc14e-1599-438e-be44-857891004979",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'file_id': 'file-3UzgX7jcC8Dt9ZAFzywg5k',\n",
|
||||
" 'index': 346,\n",
|
||||
" 'type': 'file_citation',\n",
|
||||
" 'filename': 'deep_research_blog.pdf'},\n",
|
||||
" {'file_id': 'file-3UzgX7jcC8Dt9ZAFzywg5k',\n",
|
||||
" 'index': 575,\n",
|
||||
" 'type': 'file_citation',\n",
|
||||
" 'filename': 'deep_research_blog.pdf'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.content[0][\"annotations\"][:2]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd00f6be-2862-4634-a0c3-14ee39915c90",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It will also include information from the built-in tool invocations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "e16a7110-d2d8-45fa-b372-5109f330540b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'tool_outputs': [{'id': 'fs_67d196fbb83c8191ba20586175331687089228ce932eceb1',\n",
|
||||
" 'queries': ['What is deep research by OpenAI?'],\n",
|
||||
" 'status': 'completed',\n",
|
||||
" 'type': 'file_search_call'}]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response.additional_kwargs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6fda05f0-4b81-4709-9407-f316d760ad50",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Managing conversation state\n",
|
||||
"\n",
|
||||
"The Responses API supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).\n",
|
||||
"\n",
|
||||
"#### Manually manage state\n",
|
||||
"\n",
|
||||
"You can manage the state manually or using [LangGraph](/docs/tutorials/chatbot/), as with other chat models:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "51d3e4d3-ea78-426c-9205-aecb0937fca7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"As of March 12, 2025, here are some positive news stories that highlight recent uplifting events:\n",
|
||||
"\n",
|
||||
"*... exemplify positive developments in health, environmental sustainability, and community well-being. \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"tool = {\"type\": \"web_search_preview\"}\n",
|
||||
"llm_with_tools = llm.bind_tools([tool])\n",
|
||||
"\n",
|
||||
"first_query = \"What was a positive news story from today?\"\n",
|
||||
"messages = [{\"role\": \"user\", \"content\": first_query}]\n",
|
||||
"\n",
|
||||
"response = llm_with_tools.invoke(messages)\n",
|
||||
"response_text = response.text()\n",
|
||||
"print(f\"{response_text[:100]}... {response_text[-100:]}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5da9d20f-9712-46f4-a395-5be5a7c1bc62",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Your question was: \"What was a positive news story from today?\"\n",
|
||||
"\n",
|
||||
"The last sentence of my answer was: \"These stories exemplify positive developments in health, environmental sustainability, and community well-being.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"second_query = (\n",
|
||||
" \"Repeat my question back to me, as well as the last sentence of your answer.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"messages.extend(\n",
|
||||
" [\n",
|
||||
" response,\n",
|
||||
" {\"role\": \"user\", \"content\": second_query},\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"second_response = llm_with_tools.invoke(messages)\n",
|
||||
"print(second_response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5fd8ca21-8a5e-4294-af32-11f26a040171",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"You can use [LangGraph](https://langchain-ai.github.io/langgraph/) to manage conversational threads for you in a variety of backends, including in-memory and Postgres. See [this tutorial](/docs/tutorials/chatbot/) to get started.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#### Passing `previous_response_id`\n",
|
||||
"\n",
|
||||
"When using the Responses API, LangChain messages will include an `\"id\"` field in its metadata. Passing this ID to subsequent invocations will continue the conversation. Note that this is [equivalent](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses#openai-apis-for-conversation-state) to manually passing in messages from a billing perspective."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "009e541a-b372-410e-b9dd-608a8052ce09",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hi Bob! How can I assist you today?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-4o-mini\",\n",
|
||||
" use_responses_api=True,\n",
|
||||
")\n",
|
||||
"response = llm.invoke(\"Hi, I'm Bob.\")\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "393a443a-4c5f-4a07-bc0e-c76e529b35e3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Your name is Bob. How can I help you today, Bob?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"second_response = llm.invoke(\n",
|
||||
" \"What is my name?\",\n",
|
||||
" previous_response_id=response.response_metadata[\"id\"],\n",
|
||||
")\n",
|
||||
"print(second_response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "57e27714",
|
||||
|
@ -443,6 +443,11 @@ def add_ai_message_chunks(
|
||||
else:
|
||||
usage_metadata = None
|
||||
|
||||
id = None
|
||||
for id_ in [left.id] + [o.id for o in others]:
|
||||
if id_:
|
||||
id = id_
|
||||
break
|
||||
return left.__class__(
|
||||
example=left.example,
|
||||
content=content,
|
||||
@ -450,7 +455,7 @@ def add_ai_message_chunks(
|
||||
tool_call_chunks=tool_call_chunks,
|
||||
response_metadata=response_metadata,
|
||||
usage_metadata=usage_metadata,
|
||||
id=left.id,
|
||||
id=id,
|
||||
)
|
||||
|
||||
|
||||
|
@ -531,9 +531,19 @@ def convert_to_openai_tool(
|
||||
|
||||
'description' and 'parameters' keys are now optional. Only 'name' is
|
||||
required and guaranteed to be part of the output.
|
||||
|
||||
.. versionchanged:: 0.3.44
|
||||
|
||||
Return OpenAI Responses API-style tools unchanged. This includes
|
||||
any dict with "type" in "file_search", "function", "computer_use_preview",
|
||||
"web_search_preview".
|
||||
"""
|
||||
if isinstance(tool, dict) and tool.get("type") == "function" and "function" in tool:
|
||||
return tool
|
||||
if isinstance(tool, dict):
|
||||
if tool.get("type") in ("function", "file_search", "computer_use_preview"):
|
||||
return tool
|
||||
# As of 03.12.25 can be "web_search_preview" or "web_search_preview_2025_03_11"
|
||||
if (tool.get("type") or "").startswith("web_search_preview"):
|
||||
return tool
|
||||
oai_function = convert_to_openai_function(tool, strict=strict)
|
||||
return {"type": "function", "function": oai_function}
|
||||
|
||||
|
@ -17,7 +17,7 @@ dependencies = [
|
||||
"pydantic<3.0.0,>=2.7.4; python_full_version >= \"3.12.4\"",
|
||||
]
|
||||
name = "langchain-core"
|
||||
version = "0.3.44"
|
||||
version = "0.3.45-rc.1"
|
||||
description = "Building applications with LLMs through composability"
|
||||
readme = "README.md"
|
||||
|
||||
|
@ -133,6 +133,7 @@ def test_configurable() -> None:
|
||||
"extra_body": None,
|
||||
"include_response_headers": False,
|
||||
"stream_usage": False,
|
||||
"use_responses_api": None,
|
||||
},
|
||||
"kwargs": {
|
||||
"tools": [
|
||||
|
@ -12,9 +12,11 @@ import sys
|
||||
import warnings
|
||||
from functools import partial
|
||||
from io import BytesIO
|
||||
from json import JSONDecodeError
|
||||
from math import ceil
|
||||
from operator import itemgetter
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AsyncIterator,
|
||||
Callable,
|
||||
@ -89,6 +91,7 @@ from langchain_core.runnables import (
|
||||
)
|
||||
from langchain_core.runnables.config import run_in_executor
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.tools.base import _stringify
|
||||
from langchain_core.utils import get_pydantic_field_names
|
||||
from langchain_core.utils.function_calling import (
|
||||
convert_to_openai_function,
|
||||
@ -104,12 +107,17 @@ from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
|
||||
from pydantic.v1 import BaseModel as BaseModelV1
|
||||
from typing_extensions import Self
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from openai.types.responses import Response
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# This SSL context is equivelent to the default `verify=True`.
|
||||
# https://www.python-httpx.org/advanced/ssl/#configuring-client-instances
|
||||
global_ssl_context = ssl.create_default_context(cafile=certifi.where())
|
||||
|
||||
_FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
|
||||
|
||||
|
||||
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
|
||||
"""Convert a dictionary to a LangChain message.
|
||||
@ -528,6 +536,14 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
invocation.
|
||||
"""
|
||||
|
||||
use_responses_api: Optional[bool] = None
|
||||
"""Whether to use the Responses API instead of the Chat API.
|
||||
|
||||
If not specified then will be inferred based on invocation params.
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(populate_by_name=True)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@ -654,7 +670,7 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
if output is None:
|
||||
# Happens in streaming
|
||||
continue
|
||||
token_usage = output["token_usage"]
|
||||
token_usage = output.get("token_usage")
|
||||
if token_usage is not None:
|
||||
for k, v in token_usage.items():
|
||||
if v is None:
|
||||
@ -725,6 +741,50 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
)
|
||||
return generation_chunk
|
||||
|
||||
def _stream_responses(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[ChatGenerationChunk]:
|
||||
kwargs["stream"] = True
|
||||
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||
context_manager = self.root_client.responses.create(**payload)
|
||||
|
||||
with context_manager as response:
|
||||
for chunk in response:
|
||||
if generation_chunk := _convert_responses_chunk_to_generation_chunk(
|
||||
chunk
|
||||
):
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(
|
||||
generation_chunk.text, chunk=generation_chunk
|
||||
)
|
||||
yield generation_chunk
|
||||
|
||||
async def _astream_responses(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[ChatGenerationChunk]:
|
||||
kwargs["stream"] = True
|
||||
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||
context_manager = await self.root_async_client.responses.create(**payload)
|
||||
|
||||
async with context_manager as response:
|
||||
async for chunk in response:
|
||||
if generation_chunk := _convert_responses_chunk_to_generation_chunk(
|
||||
chunk
|
||||
):
|
||||
if run_manager:
|
||||
await run_manager.on_llm_new_token(
|
||||
generation_chunk.text, chunk=generation_chunk
|
||||
)
|
||||
yield generation_chunk
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
@ -819,10 +879,19 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
raw_response = self.client.with_raw_response.create(**payload)
|
||||
response = raw_response.parse()
|
||||
generation_info = {"headers": dict(raw_response.headers)}
|
||||
elif self._use_responses_api(payload):
|
||||
response = self.root_client.responses.create(**payload)
|
||||
return _construct_lc_result_from_responses_api(response)
|
||||
else:
|
||||
response = self.client.create(**payload)
|
||||
return self._create_chat_result(response, generation_info)
|
||||
|
||||
def _use_responses_api(self, payload: dict) -> bool:
|
||||
if isinstance(self.use_responses_api, bool):
|
||||
return self.use_responses_api
|
||||
else:
|
||||
return _use_responses_api(payload)
|
||||
|
||||
def _get_request_payload(
|
||||
self,
|
||||
input_: LanguageModelInput,
|
||||
@ -834,11 +903,12 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
if stop is not None:
|
||||
kwargs["stop"] = stop
|
||||
|
||||
return {
|
||||
"messages": [_convert_message_to_dict(m) for m in messages],
|
||||
**self._default_params,
|
||||
**kwargs,
|
||||
}
|
||||
payload = {**self._default_params, **kwargs}
|
||||
if self._use_responses_api(payload):
|
||||
payload = _construct_responses_api_payload(messages, payload)
|
||||
else:
|
||||
payload["messages"] = [_convert_message_to_dict(m) for m in messages]
|
||||
return payload
|
||||
|
||||
def _create_chat_result(
|
||||
self,
|
||||
@ -877,6 +947,8 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
"model_name": response_dict.get("model", self.model_name),
|
||||
"system_fingerprint": response_dict.get("system_fingerprint", ""),
|
||||
}
|
||||
if "id" in response_dict:
|
||||
llm_output["id"] = response_dict["id"]
|
||||
|
||||
if isinstance(response, openai.BaseModel) and getattr(
|
||||
response, "choices", None
|
||||
@ -989,6 +1061,9 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
raw_response = await self.async_client.with_raw_response.create(**payload)
|
||||
response = raw_response.parse()
|
||||
generation_info = {"headers": dict(raw_response.headers)}
|
||||
elif self._use_responses_api(payload):
|
||||
response = await self.root_async_client.responses.create(**payload)
|
||||
return _construct_lc_result_from_responses_api(response)
|
||||
else:
|
||||
response = await self.async_client.create(**payload)
|
||||
return await run_in_executor(
|
||||
@ -1258,33 +1333,38 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
formatted_tools = [
|
||||
convert_to_openai_tool(tool, strict=strict) for tool in tools
|
||||
]
|
||||
tool_names = []
|
||||
for tool in formatted_tools:
|
||||
if "function" in tool:
|
||||
tool_names.append(tool["function"]["name"])
|
||||
elif "name" in tool:
|
||||
tool_names.append(tool["name"])
|
||||
else:
|
||||
pass
|
||||
if tool_choice:
|
||||
if isinstance(tool_choice, str):
|
||||
# tool_choice is a tool/function name
|
||||
if tool_choice not in ("auto", "none", "any", "required"):
|
||||
if tool_choice in tool_names:
|
||||
tool_choice = {
|
||||
"type": "function",
|
||||
"function": {"name": tool_choice},
|
||||
}
|
||||
elif tool_choice in (
|
||||
"file_search",
|
||||
"web_search_preview",
|
||||
"computer_use_preview",
|
||||
):
|
||||
tool_choice = {"type": tool_choice}
|
||||
# 'any' is not natively supported by OpenAI API.
|
||||
# We support 'any' since other models use this instead of 'required'.
|
||||
if tool_choice == "any":
|
||||
elif tool_choice == "any":
|
||||
tool_choice = "required"
|
||||
else:
|
||||
pass
|
||||
elif isinstance(tool_choice, bool):
|
||||
tool_choice = "required"
|
||||
elif isinstance(tool_choice, dict):
|
||||
tool_names = [
|
||||
formatted_tool["function"]["name"]
|
||||
for formatted_tool in formatted_tools
|
||||
]
|
||||
if not any(
|
||||
tool_name == tool_choice["function"]["name"]
|
||||
for tool_name in tool_names
|
||||
):
|
||||
raise ValueError(
|
||||
f"Tool choice {tool_choice} was specified, but the only "
|
||||
f"provided tools were {tool_names}."
|
||||
)
|
||||
pass
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unrecognized tool_choice type. Expected str, bool or dict. "
|
||||
@ -1562,6 +1642,8 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
stream_options: Dict
|
||||
Configure streaming outputs, like whether to return token usage when
|
||||
streaming (``{"include_usage": True}``).
|
||||
use_responses_api: Optional[bool]
|
||||
Whether to use the responses API.
|
||||
|
||||
See full list of supported init args and their descriptions in the params section.
|
||||
|
||||
@ -1805,6 +1887,79 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
|
||||
See ``ChatOpenAI.bind_tools()`` method for more.
|
||||
|
||||
.. dropdown:: Built-in tools
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
You can access `built-in tools <https://platform.openai.com/docs/guides/tools?api-mode=responses>`_
|
||||
supported by the OpenAI Responses API. See LangChain
|
||||
`docs <https://python.langchain.com/docs/integrations/chat/openai/>`_ for more
|
||||
detail.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
|
||||
tool = {"type": "web_search_preview"}
|
||||
llm_with_tools = llm.bind_tools([tool])
|
||||
|
||||
response = llm_with_tools.invoke("What was a positive news story from today?")
|
||||
response.content
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Today, a heartwarming story emerged from ...",
|
||||
"annotations": [
|
||||
{
|
||||
"end_index": 778,
|
||||
"start_index": 682,
|
||||
"title": "Title of story",
|
||||
"type": "url_citation",
|
||||
"url": "<url of story>",
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
.. dropdown:: Managing conversation state
|
||||
|
||||
.. versionadded:: 0.3.9
|
||||
|
||||
OpenAI's Responses API supports management of
|
||||
`conversation state <https://platform.openai.com/docs/guides/conversation-state?api-mode=responses>`_.
|
||||
Passing in response IDs from previous messages will continue a conversational
|
||||
thread. See LangChain
|
||||
`docs <https://python.langchain.com/docs/integrations/chat/openai/>`_ for more
|
||||
detail.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini", use_responses_api=True)
|
||||
response = llm.invoke("Hi, I'm Bob.")
|
||||
response.text()
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
"Hi Bob! How can I assist you today?"
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
second_response = llm.invoke(
|
||||
"What is my name?", previous_response_id=response.response_metadata["id"]
|
||||
)
|
||||
second_response.text()
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
"Your name is Bob. How can I help you today, Bob?"
|
||||
|
||||
.. dropdown:: Structured output
|
||||
|
||||
.. code-block:: python
|
||||
@ -2082,27 +2237,34 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
self, *args: Any, stream_usage: Optional[bool] = None, **kwargs: Any
|
||||
) -> Iterator[ChatGenerationChunk]:
|
||||
"""Set default stream_options."""
|
||||
stream_usage = self._should_stream_usage(stream_usage, **kwargs)
|
||||
# Note: stream_options is not a valid parameter for Azure OpenAI.
|
||||
# To support users proxying Azure through ChatOpenAI, here we only specify
|
||||
# stream_options if include_usage is set to True.
|
||||
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new
|
||||
# for release notes.
|
||||
if stream_usage:
|
||||
kwargs["stream_options"] = {"include_usage": stream_usage}
|
||||
if self._use_responses_api(kwargs):
|
||||
return super()._stream_responses(*args, **kwargs)
|
||||
else:
|
||||
stream_usage = self._should_stream_usage(stream_usage, **kwargs)
|
||||
# Note: stream_options is not a valid parameter for Azure OpenAI.
|
||||
# To support users proxying Azure through ChatOpenAI, here we only specify
|
||||
# stream_options if include_usage is set to True.
|
||||
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new
|
||||
# for release notes.
|
||||
if stream_usage:
|
||||
kwargs["stream_options"] = {"include_usage": stream_usage}
|
||||
|
||||
return super()._stream(*args, **kwargs)
|
||||
return super()._stream(*args, **kwargs)
|
||||
|
||||
async def _astream(
|
||||
self, *args: Any, stream_usage: Optional[bool] = None, **kwargs: Any
|
||||
) -> AsyncIterator[ChatGenerationChunk]:
|
||||
"""Set default stream_options."""
|
||||
stream_usage = self._should_stream_usage(stream_usage, **kwargs)
|
||||
if stream_usage:
|
||||
kwargs["stream_options"] = {"include_usage": stream_usage}
|
||||
if self._use_responses_api(kwargs):
|
||||
async for chunk in super()._astream_responses(*args, **kwargs):
|
||||
yield chunk
|
||||
else:
|
||||
stream_usage = self._should_stream_usage(stream_usage, **kwargs)
|
||||
if stream_usage:
|
||||
kwargs["stream_options"] = {"include_usage": stream_usage}
|
||||
|
||||
async for chunk in super()._astream(*args, **kwargs):
|
||||
yield chunk
|
||||
async for chunk in super()._astream(*args, **kwargs):
|
||||
yield chunk
|
||||
|
||||
def with_structured_output(
|
||||
self,
|
||||
@ -2617,3 +2779,355 @@ def _create_usage_metadata(oai_token_usage: dict) -> UsageMetadata:
|
||||
**{k: v for k, v in output_token_details.items() if v is not None}
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _create_usage_metadata_responses(oai_token_usage: dict) -> UsageMetadata:
|
||||
input_tokens = oai_token_usage.get("input_tokens", 0)
|
||||
output_tokens = oai_token_usage.get("output_tokens", 0)
|
||||
total_tokens = oai_token_usage.get("total_tokens", input_tokens + output_tokens)
|
||||
|
||||
output_token_details: dict = {
|
||||
"audio": (oai_token_usage.get("completion_tokens_details") or {}).get(
|
||||
"audio_tokens"
|
||||
),
|
||||
"reasoning": (oai_token_usage.get("output_token_details") or {}).get(
|
||||
"reasoning_tokens"
|
||||
),
|
||||
}
|
||||
return UsageMetadata(
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
total_tokens=total_tokens,
|
||||
output_token_details=OutputTokenDetails(
|
||||
**{k: v for k, v in output_token_details.items() if v is not None}
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _is_builtin_tool(tool: dict) -> bool:
|
||||
return "type" in tool and tool["type"] != "function"
|
||||
|
||||
|
||||
def _use_responses_api(payload: dict) -> bool:
|
||||
uses_builtin_tools = "tools" in payload and any(
|
||||
_is_builtin_tool(tool) for tool in payload["tools"]
|
||||
)
|
||||
responses_only_args = {"previous_response_id", "text", "truncation", "include"}
|
||||
return bool(uses_builtin_tools or responses_only_args.intersection(payload))
|
||||
|
||||
|
||||
def _construct_responses_api_payload(
|
||||
messages: Sequence[BaseMessage], payload: dict
|
||||
) -> dict:
|
||||
payload["input"] = _construct_responses_api_input(messages)
|
||||
if tools := payload.pop("tools", None):
|
||||
new_tools: list = []
|
||||
for tool in tools:
|
||||
# chat api: {"type": "function", "function": {"name": "...", "description": "...", "parameters": {...}, "strict": ...}} # noqa: E501
|
||||
# responses api: {"type": "function", "name": "...", "description": "...", "parameters": {...}, "strict": ...} # noqa: E501
|
||||
if tool["type"] == "function" and "function" in tool:
|
||||
new_tools.append({"type": "function", **tool["function"]})
|
||||
else:
|
||||
new_tools.append(tool)
|
||||
payload["tools"] = new_tools
|
||||
if tool_choice := payload.pop("tool_choice", None):
|
||||
# chat api: {"type": "function", "function": {"name": "..."}}
|
||||
# responses api: {"type": "function", "name": "..."}
|
||||
if tool_choice["type"] == "function" and "function" in tool_choice:
|
||||
payload["tool_choice"] = {"type": "function", **tool_choice["function"]}
|
||||
else:
|
||||
payload["tool_choice"] = tool_choice
|
||||
if response_format := payload.pop("response_format", None):
|
||||
if payload.get("text"):
|
||||
text = payload["text"]
|
||||
raise ValueError(
|
||||
"Can specify at most one of 'response_format' or 'text', received both:"
|
||||
f"\n{response_format=}\n{text=}"
|
||||
)
|
||||
# chat api: {"type": "json_schema, "json_schema": {"schema": {...}, "name": "...", "description": "...", "strict": ...}} # noqa: E501
|
||||
# responses api: {"type": "json_schema, "schema": {...}, "name": "...", "description": "...", "strict": ...} # noqa: E501
|
||||
if response_format["type"] == "json_schema":
|
||||
payload["text"] = {"type": "json_schema", **response_format["json_schema"]}
|
||||
else:
|
||||
payload["text"] = response_format
|
||||
return payload
|
||||
|
||||
|
||||
def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
|
||||
input_ = []
|
||||
for lc_msg in messages:
|
||||
msg = _convert_message_to_dict(lc_msg)
|
||||
if msg["role"] == "tool":
|
||||
tool_output = msg["content"]
|
||||
if not isinstance(tool_output, str):
|
||||
tool_output = _stringify(tool_output)
|
||||
function_call_output = {
|
||||
"type": "function_call_output",
|
||||
"output": tool_output,
|
||||
"call_id": msg["tool_call_id"],
|
||||
}
|
||||
input_.append(function_call_output)
|
||||
elif msg["role"] == "assistant":
|
||||
function_calls = []
|
||||
if tool_calls := msg.pop("tool_calls", None):
|
||||
# TODO: should you be able to preserve the function call object id on
|
||||
# the langchain tool calls themselves?
|
||||
if not lc_msg.additional_kwargs.get(_FUNCTION_CALL_IDS_MAP_KEY):
|
||||
raise ValueError("")
|
||||
function_call_ids = lc_msg.additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY]
|
||||
for tool_call in tool_calls:
|
||||
function_call = {
|
||||
"type": "function_call",
|
||||
"name": tool_call["function"]["name"],
|
||||
"arguments": tool_call["function"]["arguments"],
|
||||
"call_id": tool_call["id"],
|
||||
"id": function_call_ids[tool_call["id"]],
|
||||
}
|
||||
function_calls.append(function_call)
|
||||
|
||||
msg["content"] = msg.get("content") or []
|
||||
if lc_msg.additional_kwargs.get("refusal"):
|
||||
if isinstance(msg["content"], str):
|
||||
msg["content"] = [
|
||||
{
|
||||
"type": "output_text",
|
||||
"text": msg["content"],
|
||||
"annotations": [],
|
||||
}
|
||||
]
|
||||
msg["content"] = msg["content"] + [
|
||||
{"type": "refusal", "refusal": lc_msg.additional_kwargs["refusal"]}
|
||||
]
|
||||
if isinstance(msg["content"], list):
|
||||
new_blocks = []
|
||||
for block in msg["content"]:
|
||||
# chat api: {"type": "text", "text": "..."}
|
||||
# responses api: {"type": "output_text", "text": "...", "annotations": [...]} # noqa: E501
|
||||
if block["type"] == "text":
|
||||
new_blocks.append(
|
||||
{
|
||||
"type": "output_text",
|
||||
"text": block["text"],
|
||||
"annotations": block.get("annotations") or [],
|
||||
}
|
||||
)
|
||||
elif block["type"] in ("output_text", "refusal"):
|
||||
new_blocks.append(block)
|
||||
else:
|
||||
pass
|
||||
msg["content"] = new_blocks
|
||||
if msg["content"]:
|
||||
input_.append(msg)
|
||||
input_.extend(function_calls)
|
||||
elif msg["role"] == "user":
|
||||
if isinstance(msg["content"], list):
|
||||
new_blocks = []
|
||||
for block in msg["content"]:
|
||||
# chat api: {"type": "text", "text": "..."}
|
||||
# responses api: {"type": "input_text", "text": "..."}
|
||||
if block["type"] == "text":
|
||||
new_blocks.append({"type": "input_text", "text": block["text"]})
|
||||
# chat api: {"type": "image_url", "image_url": {"url": "...", "detail": "..."}} # noqa: E501
|
||||
# responses api: {"type": "image_url", "image_url": "...", "detail": "...", "file_id": "..."} # noqa: E501
|
||||
elif block["type"] == "image_url":
|
||||
new_block = {
|
||||
"type": "input_image",
|
||||
"image_url": block["image_url"]["url"],
|
||||
}
|
||||
if block["image_url"].get("detail"):
|
||||
new_block["detail"] = block["image_url"]["detail"]
|
||||
new_blocks.append(new_block)
|
||||
elif block["type"] in ("input_text", "input_image", "input_file"):
|
||||
new_blocks.append(block)
|
||||
else:
|
||||
pass
|
||||
msg["content"] = new_blocks
|
||||
input_.append(msg)
|
||||
else:
|
||||
input_.append(msg)
|
||||
|
||||
return input_
|
||||
|
||||
|
||||
def _construct_lc_result_from_responses_api(response: Response) -> ChatResult:
|
||||
"""Construct ChatResponse from OpenAI Response API response."""
|
||||
if response.error:
|
||||
raise ValueError(response.error)
|
||||
|
||||
response_metadata = {
|
||||
k: v
|
||||
for k, v in response.model_dump(exclude_none=True, mode="json").items()
|
||||
if k
|
||||
in (
|
||||
"created_at",
|
||||
"id",
|
||||
"incomplete_details",
|
||||
"metadata",
|
||||
"object",
|
||||
"status",
|
||||
"user",
|
||||
"model",
|
||||
)
|
||||
}
|
||||
# for compatibility with chat completion calls.
|
||||
response_metadata["model_name"] = response_metadata.get("model")
|
||||
if response.usage:
|
||||
usage_metadata = _create_usage_metadata_responses(response.usage.model_dump())
|
||||
else:
|
||||
usage_metadata = None
|
||||
|
||||
content_blocks: list = []
|
||||
tool_calls = []
|
||||
invalid_tool_calls = []
|
||||
additional_kwargs: dict = {}
|
||||
msg_id = None
|
||||
for output in response.output:
|
||||
if output.type == "message":
|
||||
for content in output.content:
|
||||
if content.type == "output_text":
|
||||
block = {
|
||||
"type": "text",
|
||||
"text": content.text,
|
||||
"annotations": [
|
||||
annotation.model_dump()
|
||||
for annotation in content.annotations
|
||||
],
|
||||
}
|
||||
content_blocks.append(block)
|
||||
if content.type == "refusal":
|
||||
additional_kwargs["refusal"] = content.refusal
|
||||
msg_id = output.id
|
||||
elif output.type == "function_call":
|
||||
try:
|
||||
args = json.loads(output.arguments, strict=False)
|
||||
error = None
|
||||
except JSONDecodeError as e:
|
||||
args = output.arguments
|
||||
error = str(e)
|
||||
if error is None:
|
||||
tool_call = {
|
||||
"type": "tool_call",
|
||||
"name": output.name,
|
||||
"args": args,
|
||||
"id": output.call_id,
|
||||
}
|
||||
tool_calls.append(tool_call)
|
||||
else:
|
||||
tool_call = {
|
||||
"type": "invalid_tool_call",
|
||||
"name": output.name,
|
||||
"args": args,
|
||||
"id": output.call_id,
|
||||
"error": error,
|
||||
}
|
||||
invalid_tool_calls.append(tool_call)
|
||||
if _FUNCTION_CALL_IDS_MAP_KEY not in additional_kwargs:
|
||||
additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = {}
|
||||
additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][output.call_id] = output.id
|
||||
elif output.type == "reasoning":
|
||||
additional_kwargs["reasoning"] = output.model_dump(
|
||||
exclude_none=True, mode="json"
|
||||
)
|
||||
else:
|
||||
tool_output = output.model_dump(exclude_none=True, mode="json")
|
||||
if "tool_outputs" in additional_kwargs:
|
||||
additional_kwargs["tool_outputs"].append(tool_output)
|
||||
else:
|
||||
additional_kwargs["tool_outputs"] = [tool_output]
|
||||
message = AIMessage(
|
||||
content=content_blocks,
|
||||
id=msg_id,
|
||||
usage_metadata=usage_metadata,
|
||||
response_metadata=response_metadata,
|
||||
additional_kwargs=additional_kwargs,
|
||||
tool_calls=tool_calls,
|
||||
invalid_tool_calls=invalid_tool_calls,
|
||||
)
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
|
||||
|
||||
def _convert_responses_chunk_to_generation_chunk(
|
||||
chunk: Any,
|
||||
) -> Optional[ChatGenerationChunk]:
|
||||
content = []
|
||||
tool_call_chunks: list = []
|
||||
additional_kwargs: dict = {}
|
||||
response_metadata = {}
|
||||
usage_metadata = None
|
||||
id = None
|
||||
if chunk.type == "response.output_text.delta":
|
||||
content.append(
|
||||
{"type": "text", "text": chunk.delta, "index": chunk.content_index}
|
||||
)
|
||||
elif chunk.type == "response.output_text.annotation.added":
|
||||
content.append(
|
||||
{
|
||||
"annotations": [
|
||||
chunk.annotation.model_dump(exclude_none=True, mode="json")
|
||||
],
|
||||
"index": chunk.content_index,
|
||||
}
|
||||
)
|
||||
elif chunk.type == "response.created":
|
||||
response_metadata["id"] = chunk.response.id
|
||||
elif chunk.type == "response.completed":
|
||||
msg = cast(
|
||||
AIMessage,
|
||||
(
|
||||
_construct_lc_result_from_responses_api(chunk.response)
|
||||
.generations[0]
|
||||
.message
|
||||
),
|
||||
)
|
||||
usage_metadata = msg.usage_metadata
|
||||
response_metadata = {
|
||||
k: v for k, v in msg.response_metadata.items() if k != "id"
|
||||
}
|
||||
elif chunk.type == "response.output_item.added" and chunk.item.type == "message":
|
||||
id = chunk.item.id
|
||||
elif (
|
||||
chunk.type == "response.output_item.added"
|
||||
and chunk.item.type == "function_call"
|
||||
):
|
||||
tool_call_chunks.append(
|
||||
{
|
||||
"type": "tool_call_chunk",
|
||||
"name": chunk.item.name,
|
||||
"args": chunk.item.arguments,
|
||||
"id": chunk.item.call_id,
|
||||
"index": chunk.output_index,
|
||||
}
|
||||
)
|
||||
additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = {
|
||||
chunk.item.call_id: chunk.item.id
|
||||
}
|
||||
elif chunk.type == "response.output_item.done" and chunk.item.type in (
|
||||
"web_search_call",
|
||||
"file_search_call",
|
||||
):
|
||||
additional_kwargs["tool_outputs"] = [
|
||||
chunk.item.model_dump(exclude_none=True, mode="json")
|
||||
]
|
||||
elif chunk.type == "response.function_call_arguments.delta":
|
||||
tool_call_chunks.append(
|
||||
{
|
||||
"type": "tool_call_chunk",
|
||||
"args": chunk.delta,
|
||||
"index": chunk.output_index,
|
||||
}
|
||||
)
|
||||
elif chunk.type == "response.refusal.done":
|
||||
additional_kwargs["refusal"] = chunk.refusal
|
||||
else:
|
||||
return None
|
||||
|
||||
return ChatGenerationChunk(
|
||||
message=AIMessageChunk(
|
||||
content=content, # type: ignore[arg-type]
|
||||
tool_call_chunks=tool_call_chunks,
|
||||
usage_metadata=usage_metadata,
|
||||
response_metadata=response_metadata,
|
||||
additional_kwargs=additional_kwargs,
|
||||
id=id,
|
||||
)
|
||||
)
|
||||
|
@ -7,12 +7,12 @@ authors = []
|
||||
license = { text = "MIT" }
|
||||
requires-python = "<4.0,>=3.9"
|
||||
dependencies = [
|
||||
"langchain-core<1.0.0,>=0.3.43",
|
||||
"openai<2.0.0,>=1.58.1",
|
||||
"langchain-core<1.0.0,>=0.3.45-rc.1",
|
||||
"openai<2.0.0,>=1.66.0",
|
||||
"tiktoken<1,>=0.7",
|
||||
]
|
||||
name = "langchain-openai"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9-rc.1"
|
||||
description = "An integration package connecting OpenAI and LangChain"
|
||||
readme = "README.md"
|
||||
|
||||
|
@ -0,0 +1,168 @@
|
||||
"""Test Responses API usage."""
|
||||
|
||||
import os
|
||||
from typing import Any, Optional, cast
|
||||
|
||||
import pytest
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
AIMessageChunk,
|
||||
BaseMessage,
|
||||
BaseMessageChunk,
|
||||
)
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
|
||||
def _check_response(response: Optional[BaseMessage]) -> None:
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, list)
|
||||
for block in response.content:
|
||||
assert isinstance(block, dict)
|
||||
if block["type"] == "text":
|
||||
assert isinstance(block["text"], str)
|
||||
for annotation in block["annotations"]:
|
||||
if annotation["type"] == "file_citation":
|
||||
assert all(
|
||||
key in annotation
|
||||
for key in ["file_id", "filename", "index", "type"]
|
||||
)
|
||||
elif annotation["type"] == "web_search":
|
||||
assert all(
|
||||
key in annotation
|
||||
for key in ["end_index", "start_index", "title", "type", "url"]
|
||||
)
|
||||
|
||||
text_content = response.text()
|
||||
assert isinstance(text_content, str)
|
||||
assert text_content
|
||||
assert response.usage_metadata
|
||||
assert response.usage_metadata["input_tokens"] > 0
|
||||
assert response.usage_metadata["output_tokens"] > 0
|
||||
assert response.usage_metadata["total_tokens"] > 0
|
||||
assert response.response_metadata["model_name"]
|
||||
for tool_output in response.additional_kwargs["tool_outputs"]:
|
||||
assert tool_output["id"]
|
||||
assert tool_output["status"]
|
||||
assert tool_output["type"]
|
||||
|
||||
|
||||
def test_web_search() -> None:
|
||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
first_response = llm.invoke(
|
||||
"What was a positive news story from today?",
|
||||
tools=[{"type": "web_search_preview"}],
|
||||
)
|
||||
_check_response(first_response)
|
||||
|
||||
# Test streaming
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
for chunk in llm.stream(
|
||||
"What was a positive news story from today?",
|
||||
tools=[{"type": "web_search_preview"}],
|
||||
):
|
||||
assert isinstance(chunk, AIMessageChunk)
|
||||
full = chunk if full is None else full + chunk
|
||||
_check_response(full)
|
||||
|
||||
# Use OpenAI's stateful API
|
||||
response = llm.invoke(
|
||||
"what about a negative one",
|
||||
tools=[{"type": "web_search_preview"}],
|
||||
previous_response_id=first_response.response_metadata["id"],
|
||||
)
|
||||
_check_response(response)
|
||||
|
||||
# Manually pass in chat history
|
||||
response = llm.invoke(
|
||||
[
|
||||
first_response,
|
||||
{
|
||||
"role": "user",
|
||||
"content": [{"type": "text", "text": "what about a negative one"}],
|
||||
},
|
||||
],
|
||||
tools=[{"type": "web_search_preview"}],
|
||||
)
|
||||
_check_response(response)
|
||||
|
||||
# Bind tool
|
||||
response = llm.bind_tools([{"type": "web_search_preview"}]).invoke(
|
||||
"What was a positive news story from today?"
|
||||
)
|
||||
_check_response(response)
|
||||
|
||||
|
||||
async def test_web_search_async() -> None:
|
||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
response = await llm.ainvoke(
|
||||
"What was a positive news story from today?",
|
||||
tools=[{"type": "web_search_preview"}],
|
||||
)
|
||||
_check_response(response)
|
||||
assert response.response_metadata["status"]
|
||||
|
||||
# Test streaming
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
async for chunk in llm.astream(
|
||||
"What was a positive news story from today?",
|
||||
tools=[{"type": "web_search_preview"}],
|
||||
):
|
||||
assert isinstance(chunk, AIMessageChunk)
|
||||
full = chunk if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
_check_response(full)
|
||||
|
||||
|
||||
def test_function_calling() -> None:
|
||||
def multiply(x: int, y: int) -> int:
|
||||
"""return x * y"""
|
||||
return x * y
|
||||
|
||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}])
|
||||
ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4"))
|
||||
assert len(ai_msg.tool_calls) == 1
|
||||
assert ai_msg.tool_calls[0]["name"] == "multiply"
|
||||
assert set(ai_msg.tool_calls[0]["args"]) == {"x", "y"}
|
||||
|
||||
full: Any = None
|
||||
for chunk in bound_llm.stream("whats 5 * 4"):
|
||||
assert isinstance(chunk, AIMessageChunk)
|
||||
full = chunk if full is None else full + chunk
|
||||
assert len(full.tool_calls) == 1
|
||||
assert full.tool_calls[0]["name"] == "multiply"
|
||||
assert set(full.tool_calls[0]["args"]) == {"x", "y"}
|
||||
|
||||
response = bound_llm.invoke("whats some good news from today")
|
||||
_check_response(response)
|
||||
|
||||
|
||||
def test_stateful_api() -> None:
|
||||
llm = ChatOpenAI(model="gpt-4o-mini", use_responses_api=True)
|
||||
response = llm.invoke("how are you, my name is Bobo")
|
||||
assert "id" in response.response_metadata
|
||||
|
||||
second_response = llm.invoke(
|
||||
"what's my name", previous_response_id=response.response_metadata["id"]
|
||||
)
|
||||
assert isinstance(second_response.content, list)
|
||||
assert "bobo" in second_response.content[0]["text"].lower() # type: ignore
|
||||
|
||||
|
||||
def test_file_search() -> None:
|
||||
pytest.skip() # TODO: set up infra
|
||||
llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
tool = {
|
||||
"type": "file_search",
|
||||
"vector_store_ids": [os.environ["OPENAI_VECTOR_STORE_ID"]],
|
||||
}
|
||||
response = llm.invoke("What is deep research by OpenAI?", tools=[tool])
|
||||
_check_response(response)
|
||||
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
for chunk in llm.stream("What is deep research by OpenAI?", tools=[tool]):
|
||||
assert isinstance(chunk, AIMessageChunk)
|
||||
full = chunk if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
_check_response(full)
|
@ -3,7 +3,7 @@
|
||||
import json
|
||||
from functools import partial
|
||||
from types import TracebackType
|
||||
from typing import Any, Dict, List, Literal, Optional, Type, Union
|
||||
from typing import Any, Dict, List, Literal, Optional, Type, Union, cast
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
@ -19,13 +19,30 @@ from langchain_core.messages import (
|
||||
ToolMessage,
|
||||
)
|
||||
from langchain_core.messages.ai import UsageMetadata
|
||||
from langchain_core.outputs import ChatGeneration
|
||||
from langchain_core.outputs import ChatGeneration, ChatResult
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
from openai.types.responses import ResponseOutputMessage
|
||||
from openai.types.responses.response import IncompleteDetails, Response, ResponseUsage
|
||||
from openai.types.responses.response_error import ResponseError
|
||||
from openai.types.responses.response_file_search_tool_call import (
|
||||
ResponseFileSearchToolCall,
|
||||
Result,
|
||||
)
|
||||
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
|
||||
from openai.types.responses.response_function_web_search import (
|
||||
ResponseFunctionWebSearch,
|
||||
)
|
||||
from openai.types.responses.response_output_refusal import ResponseOutputRefusal
|
||||
from openai.types.responses.response_output_text import ResponseOutputText
|
||||
from openai.types.responses.response_usage import OutputTokensDetails
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.chat_models.base import (
|
||||
_FUNCTION_CALL_IDS_MAP_KEY,
|
||||
_construct_lc_result_from_responses_api,
|
||||
_construct_responses_api_input,
|
||||
_convert_dict_to_message,
|
||||
_convert_message_to_dict,
|
||||
_convert_to_openai_response_format,
|
||||
@ -862,7 +879,7 @@ def test_nested_structured_output_strict() -> None:
|
||||
|
||||
setup: str
|
||||
punchline: str
|
||||
self_evaluation: SelfEvaluation
|
||||
_evaluation: SelfEvaluation
|
||||
|
||||
llm.with_structured_output(JokeWithEvaluation, method="json_schema")
|
||||
|
||||
@ -936,3 +953,731 @@ def test_structured_outputs_parser() -> None:
|
||||
assert isinstance(deserialized, ChatGeneration)
|
||||
result = output_parser.invoke(deserialized.message)
|
||||
assert result == parsed_response
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_error_handling() -> None:
|
||||
"""Test that errors in the response are properly raised."""
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
error=ResponseError(message="Test error", code="server_error"),
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[],
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
_construct_lc_result_from_responses_api(response)
|
||||
|
||||
assert "Test error" in str(excinfo.value)
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_basic_text_response() -> None:
|
||||
"""Test a basic text response with no tools or special features."""
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseOutputMessage(
|
||||
type="message",
|
||||
id="msg_123",
|
||||
content=[
|
||||
ResponseOutputText(
|
||||
type="output_text", text="Hello, world!", annotations=[]
|
||||
)
|
||||
],
|
||||
role="assistant",
|
||||
status="completed",
|
||||
)
|
||||
],
|
||||
usage=ResponseUsage(
|
||||
input_tokens=10,
|
||||
output_tokens=3,
|
||||
total_tokens=13,
|
||||
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
|
||||
),
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
assert isinstance(result, ChatResult)
|
||||
assert len(result.generations) == 1
|
||||
assert isinstance(result.generations[0], ChatGeneration)
|
||||
assert isinstance(result.generations[0].message, AIMessage)
|
||||
assert result.generations[0].message.content == [
|
||||
{"type": "text", "text": "Hello, world!", "annotations": []}
|
||||
]
|
||||
assert result.generations[0].message.id == "msg_123"
|
||||
assert result.generations[0].message.usage_metadata
|
||||
assert result.generations[0].message.usage_metadata["input_tokens"] == 10
|
||||
assert result.generations[0].message.usage_metadata["output_tokens"] == 3
|
||||
assert result.generations[0].message.usage_metadata["total_tokens"] == 13
|
||||
assert result.generations[0].message.response_metadata["id"] == "resp_123"
|
||||
assert result.generations[0].message.response_metadata["model_name"] == "gpt-4o"
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_multiple_text_blocks() -> None:
|
||||
"""Test a response with multiple text blocks."""
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseOutputMessage(
|
||||
type="message",
|
||||
id="msg_123",
|
||||
content=[
|
||||
ResponseOutputText(
|
||||
type="output_text", text="First part", annotations=[]
|
||||
),
|
||||
ResponseOutputText(
|
||||
type="output_text", text="Second part", annotations=[]
|
||||
),
|
||||
],
|
||||
role="assistant",
|
||||
status="completed",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
assert len(result.generations[0].message.content) == 2
|
||||
assert result.generations[0].message.content[0]["text"] == "First part" # type: ignore
|
||||
assert result.generations[0].message.content[1]["text"] == "Second part" # type: ignore
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_refusal_response() -> None:
|
||||
"""Test a response with a refusal."""
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseOutputMessage(
|
||||
type="message",
|
||||
id="msg_123",
|
||||
content=[
|
||||
ResponseOutputRefusal(
|
||||
type="refusal", refusal="I cannot assist with that request."
|
||||
)
|
||||
],
|
||||
role="assistant",
|
||||
status="completed",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
assert result.generations[0].message.content == []
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs["refusal"]
|
||||
== "I cannot assist with that request."
|
||||
)
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_function_call_valid_json() -> None:
|
||||
"""Test a response with a valid function call."""
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseFunctionToolCall(
|
||||
type="function_call",
|
||||
id="func_123",
|
||||
call_id="call_123",
|
||||
name="get_weather",
|
||||
arguments='{"location": "New York", "unit": "celsius"}',
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
msg: AIMessage = cast(AIMessage, result.generations[0].message)
|
||||
assert len(msg.tool_calls) == 1
|
||||
assert msg.tool_calls[0]["type"] == "tool_call"
|
||||
assert msg.tool_calls[0]["name"] == "get_weather"
|
||||
assert msg.tool_calls[0]["id"] == "call_123"
|
||||
assert msg.tool_calls[0]["args"] == {"location": "New York", "unit": "celsius"}
|
||||
assert _FUNCTION_CALL_IDS_MAP_KEY in result.generations[0].message.additional_kwargs
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][
|
||||
"call_123"
|
||||
]
|
||||
== "func_123"
|
||||
)
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_function_call_invalid_json() -> None:
|
||||
"""Test a response with an invalid JSON function call."""
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseFunctionToolCall(
|
||||
type="function_call",
|
||||
id="func_123",
|
||||
call_id="call_123",
|
||||
name="get_weather",
|
||||
arguments='{"location": "New York", "unit": "celsius"',
|
||||
# Missing closing brace
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
msg: AIMessage = cast(AIMessage, result.generations[0].message)
|
||||
assert len(msg.invalid_tool_calls) == 1
|
||||
assert msg.invalid_tool_calls[0]["type"] == "invalid_tool_call"
|
||||
assert msg.invalid_tool_calls[0]["name"] == "get_weather"
|
||||
assert msg.invalid_tool_calls[0]["id"] == "call_123"
|
||||
assert (
|
||||
msg.invalid_tool_calls[0]["args"]
|
||||
== '{"location": "New York", "unit": "celsius"'
|
||||
)
|
||||
assert "error" in msg.invalid_tool_calls[0]
|
||||
assert _FUNCTION_CALL_IDS_MAP_KEY in result.generations[0].message.additional_kwargs
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_complex_response() -> None:
|
||||
"""Test a complex response with multiple output types."""
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseOutputMessage(
|
||||
type="message",
|
||||
id="msg_123",
|
||||
content=[
|
||||
ResponseOutputText(
|
||||
type="output_text",
|
||||
text="Here's the information you requested:",
|
||||
annotations=[],
|
||||
)
|
||||
],
|
||||
role="assistant",
|
||||
status="completed",
|
||||
),
|
||||
ResponseFunctionToolCall(
|
||||
type="function_call",
|
||||
id="func_123",
|
||||
call_id="call_123",
|
||||
name="get_weather",
|
||||
arguments='{"location": "New York"}',
|
||||
),
|
||||
],
|
||||
metadata=dict(key1="value1", key2="value2"),
|
||||
incomplete_details=IncompleteDetails(reason="max_output_tokens"),
|
||||
status="completed",
|
||||
user="user_123",
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
# Check message content
|
||||
assert result.generations[0].message.content == [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Here's the information you requested:",
|
||||
"annotations": [],
|
||||
}
|
||||
]
|
||||
|
||||
# Check tool calls
|
||||
msg: AIMessage = cast(AIMessage, result.generations[0].message)
|
||||
assert len(msg.tool_calls) == 1
|
||||
assert msg.tool_calls[0]["name"] == "get_weather"
|
||||
|
||||
# Check metadata
|
||||
assert result.generations[0].message.response_metadata["id"] == "resp_123"
|
||||
assert result.generations[0].message.response_metadata["metadata"] == {
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
assert result.generations[0].message.response_metadata["incomplete_details"] == {
|
||||
"reason": "max_output_tokens"
|
||||
}
|
||||
assert result.generations[0].message.response_metadata["status"] == "completed"
|
||||
assert result.generations[0].message.response_metadata["user"] == "user_123"
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_no_usage_metadata() -> None:
|
||||
"""Test a response without usage metadata."""
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseOutputMessage(
|
||||
type="message",
|
||||
id="msg_123",
|
||||
content=[
|
||||
ResponseOutputText(
|
||||
type="output_text", text="Hello, world!", annotations=[]
|
||||
)
|
||||
],
|
||||
role="assistant",
|
||||
status="completed",
|
||||
)
|
||||
],
|
||||
# No usage field
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
assert cast(AIMessage, result.generations[0].message).usage_metadata is None
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_web_search_response() -> None:
|
||||
"""Test a response with web search output."""
|
||||
from openai.types.responses.response_function_web_search import (
|
||||
ResponseFunctionWebSearch,
|
||||
)
|
||||
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseFunctionWebSearch(
|
||||
id="websearch_123", type="web_search_call", status="completed"
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
assert "tool_outputs" in result.generations[0].message.additional_kwargs
|
||||
assert len(result.generations[0].message.additional_kwargs["tool_outputs"]) == 1
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs["tool_outputs"][0]["type"]
|
||||
== "web_search_call"
|
||||
)
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs["tool_outputs"][0]["id"]
|
||||
== "websearch_123"
|
||||
)
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs["tool_outputs"][0]["status"]
|
||||
== "completed"
|
||||
)
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_file_search_response() -> None:
|
||||
"""Test a response with file search output."""
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseFileSearchToolCall(
|
||||
id="filesearch_123",
|
||||
type="file_search_call",
|
||||
status="completed",
|
||||
queries=["python code", "langchain"],
|
||||
results=[
|
||||
Result(
|
||||
file_id="file_123",
|
||||
filename="example.py",
|
||||
score=0.95,
|
||||
text="def hello_world() -> None:\n print('Hello, world!')",
|
||||
attributes={"language": "python", "size": 42},
|
||||
)
|
||||
],
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
assert "tool_outputs" in result.generations[0].message.additional_kwargs
|
||||
assert len(result.generations[0].message.additional_kwargs["tool_outputs"]) == 1
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs["tool_outputs"][0]["type"]
|
||||
== "file_search_call"
|
||||
)
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs["tool_outputs"][0]["id"]
|
||||
== "filesearch_123"
|
||||
)
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs["tool_outputs"][0]["status"]
|
||||
== "completed"
|
||||
)
|
||||
assert result.generations[0].message.additional_kwargs["tool_outputs"][0][
|
||||
"queries"
|
||||
] == ["python code", "langchain"]
|
||||
assert (
|
||||
len(
|
||||
result.generations[0].message.additional_kwargs["tool_outputs"][0][
|
||||
"results"
|
||||
]
|
||||
)
|
||||
== 1
|
||||
)
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs["tool_outputs"][0]["results"][
|
||||
0
|
||||
]["file_id"]
|
||||
== "file_123"
|
||||
)
|
||||
assert (
|
||||
result.generations[0].message.additional_kwargs["tool_outputs"][0]["results"][
|
||||
0
|
||||
]["score"]
|
||||
== 0.95
|
||||
)
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_mixed_search_responses() -> None:
|
||||
"""Test a response with both web search and file search outputs."""
|
||||
|
||||
response = Response(
|
||||
id="resp_123",
|
||||
created_at=1234567890,
|
||||
model="gpt-4o",
|
||||
object="response",
|
||||
parallel_tool_calls=True,
|
||||
tools=[],
|
||||
tool_choice="auto",
|
||||
output=[
|
||||
ResponseOutputMessage(
|
||||
type="message",
|
||||
id="msg_123",
|
||||
content=[
|
||||
ResponseOutputText(
|
||||
type="output_text", text="Here's what I found:", annotations=[]
|
||||
)
|
||||
],
|
||||
role="assistant",
|
||||
status="completed",
|
||||
),
|
||||
ResponseFunctionWebSearch(
|
||||
id="websearch_123", type="web_search_call", status="completed"
|
||||
),
|
||||
ResponseFileSearchToolCall(
|
||||
id="filesearch_123",
|
||||
type="file_search_call",
|
||||
status="completed",
|
||||
queries=["python code"],
|
||||
results=[
|
||||
Result(
|
||||
file_id="file_123",
|
||||
filename="example.py",
|
||||
score=0.95,
|
||||
text="def hello_world() -> None:\n print('Hello, world!')",
|
||||
)
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
result = _construct_lc_result_from_responses_api(response)
|
||||
|
||||
# Check message content
|
||||
assert result.generations[0].message.content == [
|
||||
{"type": "text", "text": "Here's what I found:", "annotations": []}
|
||||
]
|
||||
|
||||
# Check tool outputs
|
||||
assert "tool_outputs" in result.generations[0].message.additional_kwargs
|
||||
assert len(result.generations[0].message.additional_kwargs["tool_outputs"]) == 2
|
||||
|
||||
# Check web search output
|
||||
web_search = next(
|
||||
output
|
||||
for output in result.generations[0].message.additional_kwargs["tool_outputs"]
|
||||
if output["type"] == "web_search_call"
|
||||
)
|
||||
assert web_search["id"] == "websearch_123"
|
||||
assert web_search["status"] == "completed"
|
||||
|
||||
# Check file search output
|
||||
file_search = next(
|
||||
output
|
||||
for output in result.generations[0].message.additional_kwargs["tool_outputs"]
|
||||
if output["type"] == "file_search_call"
|
||||
)
|
||||
assert file_search["id"] == "filesearch_123"
|
||||
assert file_search["queries"] == ["python code"]
|
||||
assert file_search["results"][0]["filename"] == "example.py"
|
||||
|
||||
|
||||
def test__construct_responses_api_input_human_message_with_text_blocks_conversion() -> (
|
||||
None
|
||||
):
|
||||
"""Test that human messages with text blocks are properly converted."""
|
||||
messages: list = [
|
||||
HumanMessage(content=[{"type": "text", "text": "What's in this image?"}])
|
||||
]
|
||||
result = _construct_responses_api_input(messages)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["role"] == "user"
|
||||
assert isinstance(result[0]["content"], list)
|
||||
assert len(result[0]["content"]) == 1
|
||||
assert result[0]["content"][0]["type"] == "input_text"
|
||||
assert result[0]["content"][0]["text"] == "What's in this image?"
|
||||
|
||||
|
||||
def test__construct_responses_api_input_human_message_with_image_url_conversion() -> (
|
||||
None
|
||||
):
|
||||
"""Test that human messages with image_url blocks are properly converted."""
|
||||
messages: list = [
|
||||
HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "What's in this image?"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "https://example.com/image.jpg",
|
||||
"detail": "high",
|
||||
},
|
||||
},
|
||||
]
|
||||
)
|
||||
]
|
||||
result = _construct_responses_api_input(messages)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["role"] == "user"
|
||||
assert isinstance(result[0]["content"], list)
|
||||
assert len(result[0]["content"]) == 2
|
||||
|
||||
# Check text block conversion
|
||||
assert result[0]["content"][0]["type"] == "input_text"
|
||||
assert result[0]["content"][0]["text"] == "What's in this image?"
|
||||
|
||||
# Check image block conversion
|
||||
assert result[0]["content"][1]["type"] == "input_image"
|
||||
assert result[0]["content"][1]["image_url"] == "https://example.com/image.jpg"
|
||||
assert result[0]["content"][1]["detail"] == "high"
|
||||
|
||||
|
||||
def test__construct_responses_api_input_ai_message_with_tool_calls() -> None:
|
||||
"""Test that AI messages with tool calls are properly converted."""
|
||||
tool_calls = [
|
||||
{
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
"type": "tool_call",
|
||||
}
|
||||
]
|
||||
|
||||
# Create a mapping from tool call IDs to function call IDs
|
||||
function_call_ids = {"call_123": "func_456"}
|
||||
|
||||
ai_message = AIMessage(
|
||||
content="",
|
||||
tool_calls=tool_calls,
|
||||
additional_kwargs={_FUNCTION_CALL_IDS_MAP_KEY: function_call_ids},
|
||||
)
|
||||
|
||||
result = _construct_responses_api_input([ai_message])
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["type"] == "function_call"
|
||||
assert result[0]["name"] == "get_weather"
|
||||
assert result[0]["arguments"] == '{"location": "San Francisco"}'
|
||||
assert result[0]["call_id"] == "call_123"
|
||||
assert result[0]["id"] == "func_456"
|
||||
|
||||
|
||||
def test__construct_responses_api_input_ai_message_with_tool_calls_and_content() -> (
|
||||
None
|
||||
):
|
||||
"""Test that AI messages with both tool calls and content are properly converted."""
|
||||
tool_calls = [
|
||||
{
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
"type": "tool_call",
|
||||
}
|
||||
]
|
||||
|
||||
# Create a mapping from tool call IDs to function call IDs
|
||||
function_call_ids = {"call_123": "func_456"}
|
||||
|
||||
ai_message = AIMessage(
|
||||
content="I'll check the weather for you.",
|
||||
tool_calls=tool_calls,
|
||||
additional_kwargs={_FUNCTION_CALL_IDS_MAP_KEY: function_call_ids},
|
||||
)
|
||||
|
||||
result = _construct_responses_api_input([ai_message])
|
||||
|
||||
assert len(result) == 2
|
||||
|
||||
# Check content
|
||||
assert result[0]["role"] == "assistant"
|
||||
assert result[0]["content"] == "I'll check the weather for you."
|
||||
|
||||
# Check function call
|
||||
assert result[1]["type"] == "function_call"
|
||||
assert result[1]["name"] == "get_weather"
|
||||
assert result[1]["arguments"] == '{"location": "San Francisco"}'
|
||||
assert result[1]["call_id"] == "call_123"
|
||||
assert result[1]["id"] == "func_456"
|
||||
|
||||
|
||||
def test__construct_responses_api_input_missing_function_call_ids() -> None:
|
||||
"""Test AI messages with tool calls but missing function call IDs raise an error."""
|
||||
tool_calls = [
|
||||
{
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
"type": "tool_call",
|
||||
}
|
||||
]
|
||||
|
||||
ai_message = AIMessage(content="", tool_calls=tool_calls)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
_construct_responses_api_input([ai_message])
|
||||
|
||||
|
||||
def test__construct_responses_api_input_tool_message_conversion() -> None:
|
||||
"""Test that tool messages are properly converted to function_call_output."""
|
||||
messages = [
|
||||
ToolMessage(
|
||||
content='{"temperature": 72, "conditions": "sunny"}',
|
||||
tool_call_id="call_123",
|
||||
)
|
||||
]
|
||||
|
||||
result = _construct_responses_api_input(messages)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["type"] == "function_call_output"
|
||||
assert result[0]["output"] == '{"temperature": 72, "conditions": "sunny"}'
|
||||
assert result[0]["call_id"] == "call_123"
|
||||
|
||||
|
||||
def test__construct_responses_api_input_multiple_message_types() -> None:
|
||||
"""Test conversion of a conversation with multiple message types."""
|
||||
messages = [
|
||||
SystemMessage(content="You are a helpful assistant."),
|
||||
HumanMessage(content="What's the weather in San Francisco?"),
|
||||
HumanMessage(
|
||||
content=[{"type": "text", "text": "What's the weather in San Francisco?"}]
|
||||
),
|
||||
AIMessage(
|
||||
content="",
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "call_123",
|
||||
"name": "get_weather",
|
||||
"args": {"location": "San Francisco"},
|
||||
}
|
||||
],
|
||||
additional_kwargs={_FUNCTION_CALL_IDS_MAP_KEY: {"call_123": "func_456"}},
|
||||
),
|
||||
ToolMessage(
|
||||
content='{"temperature": 72, "conditions": "sunny"}',
|
||||
tool_call_id="call_123",
|
||||
),
|
||||
AIMessage(content="The weather in San Francisco is 72°F and sunny."),
|
||||
AIMessage(
|
||||
content=[
|
||||
{
|
||||
"type": "text",
|
||||
"text": "The weather in San Francisco is 72°F and sunny.",
|
||||
}
|
||||
]
|
||||
),
|
||||
]
|
||||
messages_copy = [m.copy(deep=True) for m in messages]
|
||||
|
||||
result = _construct_responses_api_input(messages)
|
||||
|
||||
assert len(result) == len(messages)
|
||||
|
||||
# Check system message
|
||||
assert result[0]["role"] == "system"
|
||||
assert result[0]["content"] == "You are a helpful assistant."
|
||||
|
||||
# Check human message
|
||||
assert result[1]["role"] == "user"
|
||||
assert result[1]["content"] == "What's the weather in San Francisco?"
|
||||
assert result[2]["role"] == "user"
|
||||
assert result[2]["content"] == [
|
||||
{"type": "input_text", "text": "What's the weather in San Francisco?"}
|
||||
]
|
||||
|
||||
# Check function call
|
||||
assert result[3]["type"] == "function_call"
|
||||
assert result[3]["name"] == "get_weather"
|
||||
assert result[3]["arguments"] == '{"location": "San Francisco"}'
|
||||
assert result[3]["call_id"] == "call_123"
|
||||
assert result[3]["id"] == "func_456"
|
||||
|
||||
# Check function call output
|
||||
assert result[4]["type"] == "function_call_output"
|
||||
assert result[4]["output"] == '{"temperature": 72, "conditions": "sunny"}'
|
||||
assert result[4]["call_id"] == "call_123"
|
||||
|
||||
assert result[5]["role"] == "assistant"
|
||||
assert result[5]["content"] == "The weather in San Francisco is 72°F and sunny."
|
||||
|
||||
assert result[6]["role"] == "assistant"
|
||||
assert result[6]["content"] == [
|
||||
{
|
||||
"type": "output_text",
|
||||
"text": "The weather in San Francisco is 72°F and sunny.",
|
||||
"annotations": [],
|
||||
}
|
||||
]
|
||||
|
||||
# assert no mutation has occurred
|
||||
assert messages_copy == messages
|
||||
|
@ -462,7 +462,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.43"
|
||||
version = "0.3.45rc1"
|
||||
source = { editable = "../../core" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@ -520,7 +520,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9rc1"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
@ -566,7 +566,7 @@ typing = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain-core", editable = "../../core" },
|
||||
{ name = "openai", specifier = ">=1.58.1,<2.0.0" },
|
||||
{ name = "openai", specifier = ">=1.66.0,<2.0.0" },
|
||||
{ name = "tiktoken", specifier = ">=0.7,<1" },
|
||||
]
|
||||
|
||||
@ -751,7 +751,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.61.1"
|
||||
version = "1.66.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@ -763,9 +763,9 @@ dependencies = [
|
||||
{ name = "tqdm" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d9/cf/61e71ce64cf0a38f029da0f9a5f10c9fa0e69a7a977b537126dac50adfea/openai-1.61.1.tar.gz", hash = "sha256:ce1851507218209961f89f3520e06726c0aa7d0512386f0f977e3ac3e4f2472e", size = 350784 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/84/c5/3c422ca3ccc81c063955e7c20739d7f8f37fea0af865c4a60c81e6225e14/openai-1.66.0.tar.gz", hash = "sha256:8a9e672bc6eadec60a962f0b40d7d1c09050010179c919ed65322e433e2d1025", size = 396819 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/b6/2e2a011b2dc27a6711376808b4cd8c922c476ea0f1420b39892117fa8563/openai-1.61.1-py3-none-any.whl", hash = "sha256:72b0826240ce26026ac2cd17951691f046e5be82ad122d20a8e1b30ca18bd11e", size = 463126 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/f1/d52960dac9519c9de64593460826a0fe2e19159389ec97ecf3e931d2e6a3/openai-1.66.0-py3-none-any.whl", hash = "sha256:43e4a3c0c066cc5809be4e6aac456a3ebc4ec1848226ef9d1340859ac130d45a", size = 566389 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
25
uv.lock
25
uv.lock
@ -1,4 +1,5 @@
|
||||
version = 1
|
||||
revision = 1
|
||||
requires-python = ">=3.9, <4.0"
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy'",
|
||||
@ -2152,7 +2153,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "0.3.19"
|
||||
version = "0.3.20"
|
||||
source = { editable = "libs/langchain" }
|
||||
dependencies = [
|
||||
{ name = "async-timeout", marker = "python_full_version < '3.11'" },
|
||||
@ -2191,6 +2192,7 @@ requires-dist = [
|
||||
{ name = "requests", specifier = ">=2,<3" },
|
||||
{ name = "sqlalchemy", specifier = ">=1.4,<3" },
|
||||
]
|
||||
provides-extras = ["community", "anthropic", "openai", "cohere", "google-vertexai", "google-genai", "fireworks", "ollama", "together", "mistralai", "huggingface", "groq", "aws", "deepseek", "xai"]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
|
||||
@ -2259,7 +2261,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-anthropic"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9"
|
||||
source = { editable = "libs/partners/anthropic" }
|
||||
dependencies = [
|
||||
{ name = "anthropic" },
|
||||
@ -2360,7 +2362,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-community"
|
||||
version = "0.3.18"
|
||||
version = "0.3.19"
|
||||
source = { editable = "libs/community" }
|
||||
dependencies = [
|
||||
{ name = "aiohttp" },
|
||||
@ -2385,8 +2387,7 @@ requires-dist = [
|
||||
{ name = "langchain", editable = "libs/langchain" },
|
||||
{ name = "langchain-core", editable = "libs/core" },
|
||||
{ name = "langsmith", specifier = ">=0.1.125,<0.4" },
|
||||
{ name = "numpy", marker = "python_full_version < '3.12'", specifier = ">=1.26.4,<2" },
|
||||
{ name = "numpy", marker = "python_full_version >= '3.12'", specifier = ">=1.26.2,<3" },
|
||||
{ name = "numpy", specifier = ">=1.26.2,<3" },
|
||||
{ name = "pydantic-settings", specifier = ">=2.4.0,<3.0.0" },
|
||||
{ name = "pyyaml", specifier = ">=5.3" },
|
||||
{ name = "requests", specifier = ">=2,<3" },
|
||||
@ -2450,7 +2451,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.40"
|
||||
version = "0.3.43"
|
||||
source = { editable = "libs/core" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@ -2573,7 +2574,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-groq"
|
||||
version = "0.2.4"
|
||||
version = "0.2.5"
|
||||
source = { editable = "libs/partners/groq" }
|
||||
dependencies = [
|
||||
{ name = "groq" },
|
||||
@ -2732,7 +2733,7 @@ typing = []
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "0.3.7"
|
||||
version = "0.3.8"
|
||||
source = { editable = "libs/partners/openai" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
@ -2743,7 +2744,7 @@ dependencies = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain-core", editable = "libs/core" },
|
||||
{ name = "openai", specifier = ">=1.58.1,<2.0.0" },
|
||||
{ name = "openai", specifier = ">=1.66.0,<2.0.0" },
|
||||
{ name = "tiktoken", specifier = ">=0.7,<1" },
|
||||
]
|
||||
|
||||
@ -3630,7 +3631,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.61.1"
|
||||
version = "1.66.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@ -3642,9 +3643,9 @@ dependencies = [
|
||||
{ name = "tqdm" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d9/cf/61e71ce64cf0a38f029da0f9a5f10c9fa0e69a7a977b537126dac50adfea/openai-1.61.1.tar.gz", hash = "sha256:ce1851507218209961f89f3520e06726c0aa7d0512386f0f977e3ac3e4f2472e", size = 350784 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d8/e1/b3e1fda1aa32d4f40d4de744e91de4de65c854c3e53c63342e4b5f9c5995/openai-1.66.2.tar.gz", hash = "sha256:9b3a843c25f81ee09b6469d483d9fba779d5c6ea41861180772f043481b0598d", size = 397041 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/b6/2e2a011b2dc27a6711376808b4cd8c922c476ea0f1420b39892117fa8563/openai-1.61.1-py3-none-any.whl", hash = "sha256:72b0826240ce26026ac2cd17951691f046e5be82ad122d20a8e1b30ca18bd11e", size = 463126 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/6f/3315b3583ffe3e31c55b446cb22d2a7c235e65ca191674fffae62deb3c11/openai-1.66.2-py3-none-any.whl", hash = "sha256:75194057ee6bb8b732526387b6041327a05656d976fc21c064e21c8ac6b07999", size = 567268 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
Loading…
Reference in New Issue
Block a user