From 7328ec38ac836e921fb2932c70187ecdb56fc032 Mon Sep 17 00:00:00 2001 From: "open-swe[bot]" Date: Mon, 11 Aug 2025 20:40:10 +0000 Subject: [PATCH] Apply patch [skip ci] --- libs/partners/openai/langchain_openai/chat_models/base.py | 4 ++-- libs/partners/openai/langchain_openai/chat_models/batch.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 2534548b0ce..ca8b1fd68e7 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -84,7 +84,7 @@ from langchain_core.runnables import ( RunnableMap, RunnablePassthrough, ) -from langchain_core.runnables.config import run_in_executor +from langchain_core.runnables.config import RunnableConfig, run_in_executor from langchain_core.tools import BaseTool from langchain_core.tools.base import _stringify from langchain_core.utils import get_pydantic_field_names @@ -100,7 +100,7 @@ from langchain_core.utils.pydantic import ( from langchain_core.utils.utils import _build_model_kwargs, from_env, secret_from_env from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator from pydantic.v1 import BaseModel as BaseModelV1 -from typing_extensions import Self +from typing_extensions import Self, override from langchain_openai.chat_models._client_utils import ( _get_default_async_httpx_client, diff --git a/libs/partners/openai/langchain_openai/chat_models/batch.py b/libs/partners/openai/langchain_openai/chat_models/batch.py index fea23f87263..b1c269326f4 100644 --- a/libs/partners/openai/langchain_openai/chat_models/batch.py +++ b/libs/partners/openai/langchain_openai/chat_models/batch.py @@ -48,7 +48,8 @@ class OpenAIBatchClient: and result retrieval. This class provides a high-level interface to OpenAI's Batch API, which offers - 50% cost savings compared to the standard API in exchange for asynchronous processing. + 50% cost savings compared to the standard API in exchange for + asynchronous processing. """ def __init__(self, client: openai.OpenAI): @@ -182,7 +183,8 @@ class OpenAIBatchClient: # Check timeout if timeout and (time.time() - start_time) > timeout: raise BatchError( - f"Batch {batch_id} timed out after {timeout} seconds. Current status: {status}", + f"Batch {batch_id} timed out after {timeout} seconds. " + f"Current status: {status}", batch_id=batch_id, status=status, )