From 0d53d49d255fc4b3d0cb16397b5883992eb16ac1 Mon Sep 17 00:00:00 2001 From: "open-swe[bot]" Date: Mon, 11 Aug 2025 20:38:19 +0000 Subject: [PATCH] Apply patch [skip ci] --- .../partners/openai/langchain_openai/chat_models/base.py | 6 ++++-- .../openai/langchain_openai/chat_models/batch.py | 9 +++++---- .../openai/tests/unit_tests/chat_models/test_batch.py | 2 +- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index f996373bec3..2534548b0ce 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -2295,8 +2295,10 @@ class BaseChatOpenAI(BaseChatModel): inputs: List of inputs to process in batch. config: Configuration for the batch processing. return_exceptions: Whether to return exceptions instead of raising them. - use_batch_api: If True, use OpenAI's Batch API for cost savings with polling. - If False (default), use standard parallel processing for immediate results. + use_batch_api: If True, use OpenAI's Batch API for cost savings + with polling. + If False (default), use standard parallel processing + for immediate results. **kwargs: Additional parameters to pass to the underlying model. Returns: diff --git a/libs/partners/openai/langchain_openai/chat_models/batch.py b/libs/partners/openai/langchain_openai/chat_models/batch.py index 105f49e8d67..fea23f87263 100644 --- a/libs/partners/openai/langchain_openai/chat_models/batch.py +++ b/libs/partners/openai/langchain_openai/chat_models/batch.py @@ -253,7 +253,7 @@ class OpenAIBatchClient: BatchError: If batch cancellation fails. """ try: - batch = self.client.batches.cancel(batch_id) + _ = self.client.batches.cancel(batch_id) return self.retrieve_batch(batch_id) except openai.OpenAIError as e: raise BatchError( @@ -267,7 +267,8 @@ class OpenAIBatchClient: class OpenAIBatchProcessor: """ - High-level processor for managing OpenAI Batch API lifecycle with LangChain integration. + High-level processor for managing OpenAI Batch API lifecycle with + LangChain integration. This class handles the complete batch processing workflow: 1. Converts LangChain messages to OpenAI batch format @@ -299,7 +300,7 @@ class OpenAIBatchProcessor: def create_batch( self, - messages_list: list[List[BaseMessage]], + messages_list: list[list[BaseMessage]], description: Optional[str] = None, metadata: Optional[dict[str, str]] = None, **kwargs: Any, @@ -435,7 +436,7 @@ class OpenAIBatchProcessor: def process_batch( self, - messages_list: list[List[BaseMessage]], + messages_list: list[list[BaseMessage]], description: Optional[str] = None, metadata: Optional[dict[str, str]] = None, poll_interval: Optional[float] = None, diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_batch.py b/libs/partners/openai/tests/unit_tests/chat_models/test_batch.py index d80ed160a47..9ee16b522d6 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_batch.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_batch.py @@ -600,7 +600,7 @@ class TestBatchIntegrationScenarios: mock_processor_class.return_value = mock_processor inputs = [[HumanMessage(content=f"Question {i}")] for i in range(num_requests)] - _ = self.llm.batch(inputs, use_batch_api=True) + results = self.llm.batch(inputs, use_batch_api=True) assert len(results) == num_requests for i, result in enumerate(results):