fix(openai): add in output_text (#32450)

This property was deleted in `openai==1.99.2`.
This commit is contained in:
ccurme 2025-08-07 16:23:56 -03:00 committed by GitHub
parent 754528d23f
commit 68c70da33e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 46 additions and 31 deletions

View File

@ -458,7 +458,7 @@ class BaseChatOpenAI(BaseChatModel):
alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None)
)
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
openai_organization: Optional[str] = Field(default=None, alias="organization")
"""Automatically inferred from env var ``OPENAI_ORG_ID`` if not provided."""
@ -489,7 +489,7 @@ class BaseChatOpenAI(BaseChatModel):
"""Whether to return logprobs."""
top_logprobs: Optional[int] = None
"""Number of most likely tokens to return at each token position, each with
an associated log probability. `logprobs` must be set to true
an associated log probability. `logprobs` must be set to true
if this parameter is used."""
logit_bias: Optional[dict[int, int]] = None
"""Modify the likelihood of specified tokens appearing in the completion."""
@ -507,7 +507,7 @@ class BaseChatOpenAI(BaseChatModel):
Reasoning models only, like OpenAI o1, o3, and o4-mini.
Currently supported values are low, medium, and high. Reducing reasoning effort
Currently supported values are low, medium, and high. Reducing reasoning effort
can result in faster responses and fewer tokens used on reasoning in a response.
.. versionadded:: 0.2.14
@ -529,26 +529,26 @@ class BaseChatOpenAI(BaseChatModel):
"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Union[Any, None] = Field(default=None, exclude=True)
"""Optional ``httpx.Client``. Only used for sync invocations. Must specify
"""Optional ``httpx.Client``. Only used for sync invocations. Must specify
``http_async_client`` as well if you'd like a custom client for async
invocations.
"""
http_async_client: Union[Any, None] = Field(default=None, exclude=True)
"""Optional ``httpx.AsyncClient``. Only used for async invocations. Must specify
"""Optional ``httpx.AsyncClient``. Only used for async invocations. Must specify
``http_client`` as well if you'd like a custom client for sync invocations."""
stop: Optional[Union[list[str], str]] = Field(default=None, alias="stop_sequences")
"""Default stop sequences."""
@ -556,40 +556,40 @@ class BaseChatOpenAI(BaseChatModel):
"""Optional additional JSON properties to include in the request parameters when
making requests to OpenAI compatible APIs, such as vLLM, LM Studio, or other
providers.
This is the recommended way to pass custom parameters that are specific to your
OpenAI-compatible API provider but not part of the standard OpenAI API.
Examples:
- LM Studio TTL parameter: ``extra_body={"ttl": 300}``
- vLLM custom parameters: ``extra_body={"use_beam_search": True}``
- Any other provider-specific parameters
.. note::
Do NOT use ``model_kwargs`` for custom parameters that are not part of the
standard OpenAI API, as this will cause errors when making API calls. Use
standard OpenAI API, as this will cause errors when making API calls. Use
``extra_body`` instead.
"""
include_response_headers: bool = False
"""Whether to include response headers in the output message ``response_metadata``.""" # noqa: E501
disabled_params: Optional[dict[str, Any]] = Field(default=None)
"""Parameters of the OpenAI client or chat.completions endpoint that should be
"""Parameters of the OpenAI client or chat.completions endpoint that should be
disabled for the given model.
Should be specified as ``{"param": None | ['val1', 'val2']}`` where the key is the
Should be specified as ``{"param": None | ['val1', 'val2']}`` where the key is the
parameter and the value is either None, meaning that parameter should never be
used, or it's a list of disabled values for the parameter.
For example, older models may not support the ``'parallel_tool_calls'`` parameter at
all, in which case ``disabled_params={"parallel_tool_calls": None}`` can be passed
all, in which case ``disabled_params={"parallel_tool_calls": None}`` can be passed
in.
If a parameter is disabled then it will not be used by default in any methods, e.g.
in :meth:`~langchain_openai.chat_models.base.ChatOpenAI.with_structured_output`.
However this does not prevent a user from directly passed in the parameter during
invocation.
invocation.
"""
include: Optional[list[str]] = None
@ -3716,6 +3716,20 @@ def _construct_responses_api_input(messages: Sequence[BaseMessage]) -> list:
return input_
def _get_output_text(response: Response) -> str:
"""OpenAI SDK deleted response.output_text in 1.99.2"""
if hasattr(response, "output_text"):
return response.output_text
texts: list[str] = []
for output in response.output:
if output.type == "message":
for content in output.content:
if content.type == "output_text":
texts.append(content.text)
return "".join(texts)
def _construct_lc_result_from_responses_api(
response: Response,
schema: Optional[type[_BM]] = None,
@ -3830,17 +3844,18 @@ def _construct_lc_result_from_responses_api(
# text_format=Foo,
# stream=True, # <-- errors
# )
output_text = _get_output_text(response)
if (
schema is not None
and "parsed" not in additional_kwargs
and response.output_text # tool calls can generate empty output text
and output_text # tool calls can generate empty output text
and response.text
and (text_config := response.text.model_dump())
and (format_ := text_config.get("format", {}))
and (format_.get("type") == "json_schema")
):
try:
parsed_dict = json.loads(response.output_text)
parsed_dict = json.loads(output_text)
if schema and _is_pydantic_class(schema):
parsed = schema(**parsed_dict)
else:

View File

@ -480,7 +480,7 @@ wheels = [
[[package]]
name = "langchain-core"
version = "0.3.72"
version = "0.3.73"
source = { editable = "../../core" }
dependencies = [
{ name = "jsonpatch" },
@ -995,7 +995,7 @@ wheels = [
[[package]]
name = "openai"
version = "1.98.0"
version = "1.99.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
@ -1007,9 +1007,9 @@ dependencies = [
{ name = "tqdm" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/d8/9d/52eadb15c92802711d6b6cf00df3a6d0d18b588f4c5ba5ff210c6419fc03/openai-1.98.0.tar.gz", hash = "sha256:3ee0fcc50ae95267fd22bd1ad095ba5402098f3df2162592e68109999f685427", size = 496695, upload-time = "2025-07-30T12:48:03.701Z" }
sdist = { url = "https://files.pythonhosted.org/packages/de/2c/8cd1684364551237a5e6db24ce25c25ff54efcf1805b39110ec713dc2972/openai-1.99.2.tar.gz", hash = "sha256:118075b48109aa237636607b1346cf03b37cb9d74b0414cb11095850a0a22c96", size = 504752, upload-time = "2025-08-07T17:16:14.668Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a8/fe/f64631075b3d63a613c0d8ab761d5941631a470f6fa87eaaee1aa2b4ec0c/openai-1.98.0-py3-none-any.whl", hash = "sha256:b99b794ef92196829120e2df37647722104772d2a74d08305df9ced5f26eae34", size = 767713, upload-time = "2025-07-30T12:48:01.264Z" },
{ url = "https://files.pythonhosted.org/packages/b7/06/f3338c1b8685dc1634fa5174dc5ba2d3eecc7887c9fc539bb5da6f75ebb3/openai-1.99.2-py3-none-any.whl", hash = "sha256:110d85b8ed400e1d7b02f8db8e245bd757bcde347cb6923155f42cd66a10aa0b", size = 785594, upload-time = "2025-08-07T17:16:13.083Z" },
]
[[package]]