mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-30 13:50:11 +00:00
Apply patch [skip ci]
This commit is contained in:
@@ -2295,8 +2295,10 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
inputs: List of inputs to process in batch.
|
||||
config: Configuration for the batch processing.
|
||||
return_exceptions: Whether to return exceptions instead of raising them.
|
||||
use_batch_api: If True, use OpenAI's Batch API for cost savings with polling.
|
||||
If False (default), use standard parallel processing for immediate results.
|
||||
use_batch_api: If True, use OpenAI's Batch API for cost savings
|
||||
with polling.
|
||||
If False (default), use standard parallel processing
|
||||
for immediate results.
|
||||
**kwargs: Additional parameters to pass to the underlying model.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -253,7 +253,7 @@ class OpenAIBatchClient:
|
||||
BatchError: If batch cancellation fails.
|
||||
"""
|
||||
try:
|
||||
batch = self.client.batches.cancel(batch_id)
|
||||
_ = self.client.batches.cancel(batch_id)
|
||||
return self.retrieve_batch(batch_id)
|
||||
except openai.OpenAIError as e:
|
||||
raise BatchError(
|
||||
@@ -267,7 +267,8 @@ class OpenAIBatchClient:
|
||||
|
||||
class OpenAIBatchProcessor:
|
||||
"""
|
||||
High-level processor for managing OpenAI Batch API lifecycle with LangChain integration.
|
||||
High-level processor for managing OpenAI Batch API lifecycle with
|
||||
LangChain integration.
|
||||
|
||||
This class handles the complete batch processing workflow:
|
||||
1. Converts LangChain messages to OpenAI batch format
|
||||
@@ -299,7 +300,7 @@ class OpenAIBatchProcessor:
|
||||
|
||||
def create_batch(
|
||||
self,
|
||||
messages_list: list[List[BaseMessage]],
|
||||
messages_list: list[list[BaseMessage]],
|
||||
description: Optional[str] = None,
|
||||
metadata: Optional[dict[str, str]] = None,
|
||||
**kwargs: Any,
|
||||
@@ -435,7 +436,7 @@ class OpenAIBatchProcessor:
|
||||
|
||||
def process_batch(
|
||||
self,
|
||||
messages_list: list[List[BaseMessage]],
|
||||
messages_list: list[list[BaseMessage]],
|
||||
description: Optional[str] = None,
|
||||
metadata: Optional[dict[str, str]] = None,
|
||||
poll_interval: Optional[float] = None,
|
||||
|
||||
@@ -600,7 +600,7 @@ class TestBatchIntegrationScenarios:
|
||||
mock_processor_class.return_value = mock_processor
|
||||
|
||||
inputs = [[HumanMessage(content=f"Question {i}")] for i in range(num_requests)]
|
||||
_ = self.llm.batch(inputs, use_batch_api=True)
|
||||
results = self.llm.batch(inputs, use_batch_api=True)
|
||||
|
||||
assert len(results) == num_requests
|
||||
for i, result in enumerate(results):
|
||||
|
||||
Reference in New Issue
Block a user