chore: [openai] bump sdk (#31958)

This commit is contained in:
ccurme 2025-07-10 15:53:41 -04:00 committed by GitHub
parent b5462b8979
commit 612ccf847a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 1467 additions and 1433 deletions

View File

@ -16,7 +16,11 @@ ChatOpenAI v0.3 stores reasoning and tool outputs in AIMessage.additional_kwargs
"summary": [{"type": "summary_text", "text": "Reasoning summary"}],
},
"tool_outputs": [
{"type": "web_search_call", "id": "websearch_123", "status": "completed"}
{
"type": "web_search_call",
"id": "websearch_123",
"status": "completed",
}
],
"refusal": "I cannot assist with that.",
},

View File

@ -138,7 +138,11 @@ class AzureChatOpenAI(BaseChatOpenAI):
AIMessage(
content="J'adore programmer.",
usage_metadata={"input_tokens": 28, "output_tokens": 6, "total_tokens": 34},
usage_metadata={
"input_tokens": 28,
"output_tokens": 6,
"total_tokens": 34,
},
response_metadata={
"token_usage": {
"completion_tokens": 6,
@ -184,8 +188,12 @@ class AzureChatOpenAI(BaseChatOpenAI):
AIMessageChunk(content="ad", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content="ore", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content=" la", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content=" programm", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(content="ation", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(
content=" programm", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f"
)
AIMessageChunk(
content="ation", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f"
)
AIMessageChunk(content=".", id="run-a6f294d3-0700-4f6a-abc2-c6ef1178c37f")
AIMessageChunk(
content="",
@ -294,7 +302,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
rating: Optional[int] = Field(
description="How funny the joke is, from 1 to 10"
)
structured_llm = llm.with_structured_output(Joke)
@ -930,7 +940,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
)
llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
llm = AzureChatOpenAI(
azure_deployment="...", model="gpt-4o", temperature=0
)
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke(
@ -961,7 +973,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
)
llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
llm = AzureChatOpenAI(
azure_deployment="...", model="gpt-4o", temperature=0
)
structured_llm = llm.with_structured_output(
AnswerWithJustification, method="function_calling"
)
@ -990,7 +1004,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
justification: str
llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
llm = AzureChatOpenAI(
azure_deployment="...", model="gpt-4o", temperature=0
)
structured_llm = llm.with_structured_output(
AnswerWithJustification, include_raw=True
)
@ -1022,7 +1038,9 @@ class AzureChatOpenAI(BaseChatOpenAI):
]
llm = AzureChatOpenAI(azure_deployment="...", model="gpt-4o", temperature=0)
llm = AzureChatOpenAI(
azure_deployment="...", model="gpt-4o", temperature=0
)
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke(

View File

@ -2126,7 +2126,9 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
AIMessageChunk(content="", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
AIMessageChunk(content="J", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
AIMessageChunk(content="'adore", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
AIMessageChunk(
content="'adore", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0"
)
AIMessageChunk(content=" la", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0")
AIMessageChunk(
content=" programmation", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0"
@ -2182,7 +2184,11 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
"logprobs": None,
},
id="run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0",
usage_metadata={"input_tokens": 31, "output_tokens": 5, "total_tokens": 36},
usage_metadata={
"input_tokens": 31,
"output_tokens": 5,
"total_tokens": 36,
},
)
.. dropdown:: Tool calling
@ -2299,7 +2305,9 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
tool = {"type": "web_search_preview"}
llm_with_tools = llm.bind_tools([tool])
response = llm_with_tools.invoke("What was a positive news story from today?")
response = llm_with_tools.invoke(
"What was a positive news story from today?"
)
response.content
.. code-block:: python
@ -2346,7 +2354,8 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
.. code-block:: python
second_response = llm.invoke(
"What is my name?", previous_response_id=response.response_metadata["id"]
"What is my name?",
previous_response_id=response.response_metadata["id"],
)
second_response.text()
@ -2424,7 +2433,9 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
rating: Optional[int] = Field(
description="How funny the joke is, from 1 to 10"
)
structured_llm = llm.with_structured_output(Joke)

View File

@ -65,7 +65,7 @@ target-version = "py39"
[tool.ruff.lint]
select = ["E", "F", "I", "T201", "UP", "S"]
ignore = [ "UP007", ]
ignore = [ "UP007", "UP045" ]
[tool.ruff.format]
docstring-code-format = true

File diff suppressed because it is too large Load Diff