mirror of
https://github.com/hwchase17/langchain.git
synced 2026-04-05 03:48:48 +00:00
```python
"""python scripts/update_mypy_ruff.py"""
import glob
import tomllib
from pathlib import Path
import toml
import subprocess
import re
ROOT_DIR = Path(__file__).parents[1]
def main():
for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True):
print(path)
with open(path, "rb") as f:
pyproject = tomllib.load(f)
try:
pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = (
"^1.10"
)
pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = (
"^0.5"
)
except KeyError:
continue
with open(path, "w") as f:
toml.dump(pyproject, f)
cwd = "/".join(path.split("/")[:-1])
completed = subprocess.run(
"poetry lock --no-update; poetry install --with typing; poetry run mypy . --no-color",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
logs = completed.stdout.split("\n")
to_ignore = {}
for l in logs:
if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l):
path, line_no, error_type = re.match(
"^(.*)\:(\d+)\: error:.*\[(.*)\]", l
).groups()
if (path, line_no) in to_ignore:
to_ignore[(path, line_no)].append(error_type)
else:
to_ignore[(path, line_no)] = [error_type]
print(len(to_ignore))
for (error_path, line_no), error_types in to_ignore.items():
all_errors = ", ".join(error_types)
full_path = f"{cwd}/{error_path}"
try:
with open(full_path, "r") as f:
file_lines = f.readlines()
except FileNotFoundError:
continue
file_lines[int(line_no) - 1] = (
file_lines[int(line_no) - 1][:-1] + f" # type: ignore[{all_errors}]\n"
)
with open(full_path, "w") as f:
f.write("".join(file_lines))
subprocess.run(
"poetry run ruff format .; poetry run ruff --select I --fix .",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
if __name__ == "__main__":
main()
```
77 lines
2.3 KiB
Python
77 lines
2.3 KiB
Python
"""Test LLM chain."""
|
|
|
|
from tempfile import TemporaryDirectory
|
|
from typing import Dict, List, Union
|
|
from unittest.mock import patch
|
|
|
|
import pytest
|
|
from langchain.chains.llm import LLMChain
|
|
from langchain_core.output_parsers import BaseOutputParser
|
|
from langchain_core.prompts import PromptTemplate
|
|
|
|
from tests.unit_tests.llms.fake_llm import FakeLLM
|
|
|
|
|
|
class FakeOutputParser(BaseOutputParser):
|
|
"""Fake output parser class for testing."""
|
|
|
|
def parse(self, text: str) -> Union[str, List[str], Dict[str, str]]:
|
|
"""Parse by splitting."""
|
|
return text.split()
|
|
|
|
|
|
@pytest.fixture
|
|
def fake_llm_chain() -> LLMChain:
|
|
"""Fake LLM chain for testing purposes."""
|
|
prompt = PromptTemplate(input_variables=["bar"], template="This is a {bar}:")
|
|
return LLMChain(prompt=prompt, llm=FakeLLM(), output_key="text1")
|
|
|
|
|
|
@patch(
|
|
"langchain_community.llms.loading.get_type_to_cls_dict",
|
|
lambda: {"fake": lambda: FakeLLM},
|
|
)
|
|
def test_serialization(fake_llm_chain: LLMChain) -> None:
|
|
"""Test serialization."""
|
|
from langchain.chains.loading import load_chain
|
|
|
|
with TemporaryDirectory() as temp_dir:
|
|
file = temp_dir + "/llm.json"
|
|
fake_llm_chain.save(file)
|
|
loaded_chain = load_chain(file)
|
|
assert loaded_chain == fake_llm_chain
|
|
|
|
|
|
def test_missing_inputs(fake_llm_chain: LLMChain) -> None:
|
|
"""Test error is raised if inputs are missing."""
|
|
with pytest.raises(ValueError):
|
|
fake_llm_chain({"foo": "bar"})
|
|
|
|
|
|
def test_valid_call(fake_llm_chain: LLMChain) -> None:
|
|
"""Test valid call of LLM chain."""
|
|
output = fake_llm_chain({"bar": "baz"})
|
|
assert output == {"bar": "baz", "text1": "foo"}
|
|
|
|
# Test with stop words.
|
|
output = fake_llm_chain({"bar": "baz", "stop": ["foo"]})
|
|
# Response should be `bar` now.
|
|
assert output == {"bar": "baz", "stop": ["foo"], "text1": "bar"}
|
|
|
|
|
|
def test_predict_method(fake_llm_chain: LLMChain) -> None:
|
|
"""Test predict method works."""
|
|
output = fake_llm_chain.predict(bar="baz")
|
|
assert output == "foo"
|
|
|
|
|
|
def test_predict_and_parse() -> None:
|
|
"""Test parsing ability."""
|
|
prompt = PromptTemplate(
|
|
input_variables=["foo"], template="{foo}", output_parser=FakeOutputParser()
|
|
)
|
|
llm = FakeLLM(queries={"foo": "foo bar"})
|
|
chain = LLMChain(prompt=prompt, llm=llm)
|
|
output = chain.predict_and_parse(foo="foo")
|
|
assert output == ["foo", "bar"]
|