mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-08 04:25:46 +00:00
chore: [openai] bump sdk (#31958)
This commit is contained in:
parent
b5462b8979
commit
612ccf847a
@ -16,7 +16,11 @@ ChatOpenAI v0.3 stores reasoning and tool outputs in AIMessage.additional_kwargs
|
|||||||
"summary": [{"type": "summary_text", "text": "Reasoning summary"}],
|
"summary": [{"type": "summary_text", "text": "Reasoning summary"}],
|
||||||
},
|
},
|
||||||
"tool_outputs": [
|
"tool_outputs": [
|
||||||
{"type": "web_search_call", "id": "websearch_123", "status": "completed"}
|
{
|
||||||
|
"type": "web_search_call",
|
||||||
|
"id": "websearch_123",
|
||||||
|
"status": "completed",
|
||||||
|
}
|
||||||
],
|
],
|
||||||
"refusal": "I cannot assist with that.",
|
"refusal": "I cannot assist with that.",
|
||||||
},
|
},
|
||||||
|
@ -138,7 +138,11 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
|||||||
|
|
||||||
AIMessage(
|
AIMessage(
|
||||||
content="J'adore programmer.",
|
content="J'adore programmer.",
|
||||||
usage_metadata={"input_tokens": 28, "output_tokens": 6, "total_tokens": 34},
|
usage_metadata={
|
||||||
|
"input_tokens": 28,
|
||||||
|
"output_tokens": 6,
|
||||||
|
"total_tokens": 34,
|
||||||
|
},
|
||||||
response_metadata={
|
response_metadata={
|
||||||
"token_usage": {
|
"token_usage": {
|
||||||
"completion_tokens": 6,
|
"completion_tokens": 6,
|
||||||
@ -184,8 +188,12 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
|||||||
AIMessageChunk(content="ad", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
AIMessageChunk(content="ad", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
||||||
AIMessageChunk(content="ore", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
AIMessageChunk(content="ore", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
||||||
AIMessageChunk(content=" la", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
AIMessageChunk(content=" la", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
||||||
AIMessageChunk(content=" programm", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
AIMessageChunk(
|
||||||
AIMessageChunk(content="ation", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
content=" programm", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f"
|
||||||
|
)
|
||||||
|
AIMessageChunk(
|
||||||
|
content="ation", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f"
|
||||||
|
)
|
||||||
AIMessageChunk(content=".", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
AIMessageChunk(content=".", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
|
||||||
AIMessageChunk(
|
AIMessageChunk(
|
||||||
content="",
|
content="",
|
||||||
@ -294,7 +302,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
|||||||
|
|
||||||
setup: str = Field(description="The setup of the joke")
|
setup: str = Field(description="The setup of the joke")
|
||||||
punchline: str = Field(description="The punchline to the joke")
|
punchline: str = Field(description="The punchline to the joke")
|
||||||
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
|
rating: Optional[int] = Field(
|
||||||
|
description="How funny the joke is, from 1 to 10"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
structured_llm = llm.with_structured_output(Joke)
|
structured_llm = llm.with_structured_output(Joke)
|
||||||
@ -930,7 +940,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
|
llm = AzureChatOpenAI(
|
||||||
|
azure_deployment="...", model="gpt-4o", temperature=0
|
||||||
|
)
|
||||||
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
||||||
|
|
||||||
structured_llm.invoke(
|
structured_llm.invoke(
|
||||||
@ -961,7 +973,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
|
llm = AzureChatOpenAI(
|
||||||
|
azure_deployment="...", model="gpt-4o", temperature=0
|
||||||
|
)
|
||||||
structured_llm = llm.with_structured_output(
|
structured_llm = llm.with_structured_output(
|
||||||
AnswerWithJustification, method="function_calling"
|
AnswerWithJustification, method="function_calling"
|
||||||
)
|
)
|
||||||
@ -990,7 +1004,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
|||||||
justification: str
|
justification: str
|
||||||
|
|
||||||
|
|
||||||
llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
|
llm = AzureChatOpenAI(
|
||||||
|
azure_deployment="...", model="gpt-4o", temperature=0
|
||||||
|
)
|
||||||
structured_llm = llm.with_structured_output(
|
structured_llm = llm.with_structured_output(
|
||||||
AnswerWithJustification, include_raw=True
|
AnswerWithJustification, include_raw=True
|
||||||
)
|
)
|
||||||
@ -1022,7 +1038,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
|
llm = AzureChatOpenAI(
|
||||||
|
azure_deployment="...", model="gpt-4o", temperature=0
|
||||||
|
)
|
||||||
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
structured_llm = llm.with_structured_output(AnswerWithJustification)
|
||||||
|
|
||||||
structured_llm.invoke(
|
structured_llm.invoke(
|
||||||
|
@ -2126,7 +2126,9 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
|||||||
|
|
||||||
AIMessageChunk(content="", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
|
AIMessageChunk(content="", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
|
||||||
AIMessageChunk(content="J", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
|
AIMessageChunk(content="J", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
|
||||||
AIMessageChunk(content="'adore", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
|
AIMessageChunk(
|
||||||
|
content="'adore", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0"
|
||||||
|
)
|
||||||
AIMessageChunk(content=" la", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
|
AIMessageChunk(content=" la", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
|
||||||
AIMessageChunk(
|
AIMessageChunk(
|
||||||
content=" programmation", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0"
|
content=" programmation", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0"
|
||||||
@ -2182,7 +2184,11 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
|||||||
"logprobs": None,
|
"logprobs": None,
|
||||||
},
|
},
|
||||||
id="run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0",
|
id="run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0",
|
||||||
usage_metadata={"input_tokens": 31, "output_tokens": 5, "total_tokens": 36},
|
usage_metadata={
|
||||||
|
"input_tokens": 31,
|
||||||
|
"output_tokens": 5,
|
||||||
|
"total_tokens": 36,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
.. dropdown:: Tool calling
|
.. dropdown:: Tool calling
|
||||||
@ -2299,7 +2305,9 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
|||||||
tool = {"type": "web_search_preview"}
|
tool = {"type": "web_search_preview"}
|
||||||
llm_with_tools = llm.bind_tools([tool])
|
llm_with_tools = llm.bind_tools([tool])
|
||||||
|
|
||||||
response = llm_with_tools.invoke("What was a positive news story from today?")
|
response = llm_with_tools.invoke(
|
||||||
|
"What was a positive news story from today?"
|
||||||
|
)
|
||||||
response.content
|
response.content
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
@ -2346,7 +2354,8 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
second_response = llm.invoke(
|
second_response = llm.invoke(
|
||||||
"What is my name?", previous_response_id=response.response_metadata["id"]
|
"What is my name?",
|
||||||
|
previous_response_id=response.response_metadata["id"],
|
||||||
)
|
)
|
||||||
second_response.text()
|
second_response.text()
|
||||||
|
|
||||||
@ -2424,7 +2433,9 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
|||||||
|
|
||||||
setup: str = Field(description="The setup of the joke")
|
setup: str = Field(description="The setup of the joke")
|
||||||
punchline: str = Field(description="The punchline to the joke")
|
punchline: str = Field(description="The punchline to the joke")
|
||||||
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
|
rating: Optional[int] = Field(
|
||||||
|
description="How funny the joke is, from 1 to 10"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
structured_llm = llm.with_structured_output(Joke)
|
structured_llm = llm.with_structured_output(Joke)
|
||||||
|
@ -65,7 +65,7 @@ target-version = "py39"
|
|||||||
|
|
||||||
[tool.ruff.lint]
|
[tool.ruff.lint]
|
||||||
select = ["E", "F", "I", "T201", "UP", "S"]
|
select = ["E", "F", "I", "T201", "UP", "S"]
|
||||||
ignore = [ "UP007", ]
|
ignore = [ "UP007", "UP045" ]
|
||||||
|
|
||||||
[tool.ruff.format]
|
[tool.ruff.format]
|
||||||
docstring-code-format = true
|
docstring-code-format = true
|
||||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user