mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-23 19:39:58 +00:00
infra: update mypy 1.10, ruff 0.5 (#23721)
```python """python scripts/update_mypy_ruff.py""" import glob import tomllib from pathlib import Path import toml import subprocess import re ROOT_DIR = Path(__file__).parents[1] def main(): for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True): print(path) with open(path, "rb") as f: pyproject = tomllib.load(f) try: pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = ( "^1.10" ) pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = ( "^0.5" ) except KeyError: continue with open(path, "w") as f: toml.dump(pyproject, f) cwd = "/".join(path.split("/")[:-1]) completed = subprocess.run( "poetry lock --no-update; poetry install --with typing; poetry run mypy . --no-color", cwd=cwd, shell=True, capture_output=True, text=True, ) logs = completed.stdout.split("\n") to_ignore = {} for l in logs: if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l): path, line_no, error_type = re.match( "^(.*)\:(\d+)\: error:.*\[(.*)\]", l ).groups() if (path, line_no) in to_ignore: to_ignore[(path, line_no)].append(error_type) else: to_ignore[(path, line_no)] = [error_type] print(len(to_ignore)) for (error_path, line_no), error_types in to_ignore.items(): all_errors = ", ".join(error_types) full_path = f"{cwd}/{error_path}" try: with open(full_path, "r") as f: file_lines = f.readlines() except FileNotFoundError: continue file_lines[int(line_no) - 1] = ( file_lines[int(line_no) - 1][:-1] + f" # type: ignore[{all_errors}]\n" ) with open(full_path, "w") as f: f.write("".join(file_lines)) subprocess.run( "poetry run ruff format .; poetry run ruff --select I --fix .", cwd=cwd, shell=True, capture_output=True, text=True, ) if __name__ == "__main__": main() ```
This commit is contained in:
@@ -81,7 +81,7 @@ class TestUnitCPALChain_MathWordProblems(unittest.TestCase):
|
||||
return prompt
|
||||
|
||||
narrative = LLMMockData(
|
||||
**{
|
||||
**{ # type: ignore[arg-type, arg-type]
|
||||
"question": (
|
||||
"jan has three times the number of pets as marcia. "
|
||||
"marcia has two more pets than cindy."
|
||||
@@ -100,7 +100,7 @@ class TestUnitCPALChain_MathWordProblems(unittest.TestCase):
|
||||
)
|
||||
|
||||
causal_model = LLMMockData(
|
||||
**{
|
||||
**{ # type: ignore[arg-type, arg-type]
|
||||
"question": (
|
||||
"jan has three times the number of pets as marcia. "
|
||||
"marcia has two more pets than cindy."
|
||||
@@ -137,7 +137,7 @@ class TestUnitCPALChain_MathWordProblems(unittest.TestCase):
|
||||
)
|
||||
|
||||
intervention = LLMMockData(
|
||||
**{
|
||||
**{ # type: ignore[arg-type, arg-type]
|
||||
"question": ("if cindy has ten pets"),
|
||||
"completion": (
|
||||
"{\n"
|
||||
@@ -152,7 +152,7 @@ class TestUnitCPALChain_MathWordProblems(unittest.TestCase):
|
||||
)
|
||||
|
||||
query = LLMMockData(
|
||||
**{
|
||||
**{ # type: ignore[arg-type, arg-type]
|
||||
"question": ("how many pets does jan have? "),
|
||||
"completion": (
|
||||
"{\n"
|
||||
|
@@ -1,4 +1,5 @@
|
||||
"""Test SQL Database Chain."""
|
||||
|
||||
from langchain_community.llms.openai import OpenAI
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine, insert
|
||||
|
@@ -14,7 +14,7 @@ class TestAnthropicFunctions(unittest.TestCase):
|
||||
"""
|
||||
|
||||
def test_default_chat_anthropic(self) -> None:
|
||||
base_model = AnthropicFunctions(model="claude-2")
|
||||
base_model = AnthropicFunctions(model="claude-2") # type: ignore[call-arg]
|
||||
self.assertIsInstance(base_model.model, ChatAnthropic)
|
||||
|
||||
# bind functions
|
||||
@@ -64,7 +64,7 @@ class TestAnthropicFunctions(unittest.TestCase):
|
||||
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
|
||||
},
|
||||
});"""
|
||||
llm = BedrockChat(
|
||||
llm = BedrockChat( # type: ignore[call-arg]
|
||||
model_id="anthropic.claude-v2",
|
||||
model_kwargs={"temperature": 0.1},
|
||||
region_name="us-east-1",
|
||||
|
@@ -65,7 +65,7 @@ class TestOllamaFunctions(unittest.TestCase):
|
||||
def test_ollama_functions_tools(self) -> None:
|
||||
base_model = OllamaFunctions(model="phi3", format="json")
|
||||
model = base_model.bind_tools(
|
||||
tools=[PubmedQueryRun(), DuckDuckGoSearchResults(max_results=2)]
|
||||
tools=[PubmedQueryRun(), DuckDuckGoSearchResults(max_results=2)] # type: ignore[call-arg]
|
||||
)
|
||||
res = model.invoke("What causes lung cancer?")
|
||||
self.assertIsInstance(res, AIMessage)
|
||||
|
@@ -1,4 +1,5 @@
|
||||
"""Integration test for video captioning."""
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from langchain_experimental.video_captioning.base import VideoCaptioningChain
|
||||
@@ -11,7 +12,7 @@ def test_video_captioning_hard() -> None:
|
||||
-FXX%20USA%20%C2%ABPromo%20Noon%20-%204A%20Every%20Day%EF%BF%BD%EF
|
||||
%BF%BD%C2%BB%20November%202021%EF%BF%BD%EF%BF%BD-%281080p60%29.mp4
|
||||
"""
|
||||
chain = VideoCaptioningChain(
|
||||
chain = VideoCaptioningChain( # type: ignore[call-arg]
|
||||
llm=ChatOpenAI(
|
||||
model="gpt-4",
|
||||
max_tokens=4000,
|
||||
|
Reference in New Issue
Block a user