diff --git a/comprehensive_fix.py b/comprehensive_fix.py new file mode 100644 index 00000000000..2caccadc4f4 --- /dev/null +++ b/comprehensive_fix.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +"""Comprehensive script to fix all linting issues in the OpenAI batch API implementation.""" + +import re + +def fix_base_py_comprehensive(): + """Fix all issues in base.py""" + file_path = '/home/daytona/langchain/libs/partners/openai/langchain_openai/chat_models/base.py' + + with open(file_path, 'r') as f: + content = f.read() + + # Add missing imports - find the existing imports section and add what's needed + # Look for the langchain_core imports section + if 'from langchain_core.language_models.chat_models import BaseChatModel' in content: + # Add the missing imports after the existing langchain_core imports + import_pattern = r'(from langchain_core\.runnables import RunnablePassthrough\n)' + replacement = r'\1from langchain_core.runnables.config import RunnableConfig\nfrom typing_extensions import override\n' + content = re.sub(import_pattern, replacement, content) + + # Fix type annotations to use modern syntax + content = re.sub(r'List\[LanguageModelInput\]', 'list[LanguageModelInput]', content) + content = re.sub(r'List\[RunnableConfig\]', 'list[RunnableConfig]', content) + content = re.sub(r'List\[BaseMessage\]', 'list[BaseMessage]', content) + + # Fix long lines in docstrings by breaking them + content = content.replace( + ' 1. Standard mode (use_batch_api=False): Uses parallel invoke for immediate results', + ' 1. Standard mode (use_batch_api=False): Uses parallel invoke for\n immediate results' + ) + content = content.replace( + ' 2. Batch API mode (use_batch_api=True): Uses OpenAI Batch API for 50% cost savings', + ' 2. Batch API mode (use_batch_api=True): Uses OpenAI Batch API for\n 50% cost savings' + ) + + with open(file_path, 'w') as f: + f.write(content) + print("Fixed base.py comprehensively") + +def fix_batch_py_types(): + """Fix type annotations in batch.py""" + file_path = '/home/daytona/langchain/libs/partners/openai/langchain_openai/chat_models/batch.py' + + with open(file_path, 'r') as f: + content = f.read() + + # Replace all Dict and List with lowercase versions + content = re.sub(r'Dict\[([^\]]+)\]', r'dict[\1]', content) + content = re.sub(r'List\[([^\]]+)\]', r'list[\1]', content) + + # Remove Dict and List from imports if they exist + content = re.sub(r'from typing import ([^)]*?)Dict,?\s*([^)]*?)\n', r'from typing import \1\2\n', content) + content = re.sub(r'from typing import ([^)]*?)List,?\s*([^)]*?)\n', r'from typing import \1\2\n', content) + content = re.sub(r'from typing import ([^)]*?),\s*Dict([^)]*?)\n', r'from typing import \1\2\n', content) + content = re.sub(r'from typing import ([^)]*?),\s*List([^)]*?)\n', r'from typing import \1\2\n', content) + + with open(file_path, 'w') as f: + f.write(content) + print("Fixed batch.py type annotations") + +def fix_integration_tests_comprehensive(): + """Fix all issues in integration tests""" + file_path = '/home/daytona/langchain/libs/partners/openai/tests/integration_tests/chat_models/test_batch_integration.py' + + with open(file_path, 'r') as f: + content = f.read() + + # Fix long lines by breaking them properly + content = content.replace( + 'content="What is the capital of France? Answer with just the city name."', + 'content=(\n "What is the capital of France? "\n "Answer with just the city name."\n )' + ) + + content = content.replace( + 'content="What is the smallest planet? Answer with just the planet name."', + 'content=(\n "What is the smallest planet? "\n "Answer with just the planet name."\n )' + ) + + # Fix unused variables + content = re.sub(r'processing_time = end_time - start_time', '_ = end_time - start_time', content) + + with open(file_path, 'w') as f: + f.write(content) + print("Fixed integration tests comprehensively") + +def fix_unit_tests_comprehensive(): + """Fix all issues in unit tests""" + file_path = '/home/daytona/langchain/libs/partners/openai/tests/unit_tests/chat_models/test_batch.py' + + with open(file_path, 'r') as f: + content = f.read() + + # Find and fix the specific test methods that have undefined results + # This is a more targeted fix for the test that checks conversion + pattern1 = r'(\s+)_ = self\.llm\.batch\(inputs, use_batch_api=True\)\s*\n(\s+)# Verify conversion happened\s*\n(\s+)assert len\(results\) == num_requests' + replacement1 = r'\1results = self.llm.batch(inputs, use_batch_api=True)\n\2# Verify conversion happened\n\3assert len(results) == num_requests' + content = re.sub(pattern1, replacement1, content) + + # Fix the other test with undefined results + pattern2 = r'(\s+)_ = self\.llm\.batch\(inputs, use_batch_api=True\)\s*\n(\s+)assert len\(results\) == 2' + replacement2 = r'\1results = self.llm.batch(inputs, use_batch_api=True)\n\2assert len(results) == 2' + content = re.sub(pattern2, replacement2, content) + + with open(file_path, 'w') as f: + f.write(content) + print("Fixed unit tests comprehensively") + +if __name__ == "__main__": + print("Running comprehensive fixes...") + fix_base_py_comprehensive() + fix_batch_py_types() + fix_integration_tests_comprehensive() + fix_unit_tests_comprehensive() + print("All comprehensive fixes completed!")