mirror of
https://github.com/hwchase17/langchain.git
synced 2025-11-24 01:22:13 +00:00
```python
"""python scripts/update_mypy_ruff.py"""
import glob
import tomllib
from pathlib import Path
import toml
import subprocess
import re
ROOT_DIR = Path(__file__).parents[1]
def main():
for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True):
print(path)
with open(path, "rb") as f:
pyproject = tomllib.load(f)
try:
pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = (
"^1.10"
)
pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = (
"^0.5"
)
except KeyError:
continue
with open(path, "w") as f:
toml.dump(pyproject, f)
cwd = "/".join(path.split("/")[:-1])
completed = subprocess.run(
"poetry lock --no-update; poetry install --with typing; poetry run mypy . --no-color",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
logs = completed.stdout.split("\n")
to_ignore = {}
for l in logs:
if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l):
path, line_no, error_type = re.match(
"^(.*)\:(\d+)\: error:.*\[(.*)\]", l
).groups()
if (path, line_no) in to_ignore:
to_ignore[(path, line_no)].append(error_type)
else:
to_ignore[(path, line_no)] = [error_type]
print(len(to_ignore))
for (error_path, line_no), error_types in to_ignore.items():
all_errors = ", ".join(error_types)
full_path = f"{cwd}/{error_path}"
try:
with open(full_path, "r") as f:
file_lines = f.readlines()
except FileNotFoundError:
continue
file_lines[int(line_no) - 1] = (
file_lines[int(line_no) - 1][:-1] + f" # type: ignore[{all_errors}]\n"
)
with open(full_path, "w") as f:
f.write("".join(file_lines))
subprocess.run(
"poetry run ruff format .; poetry run ruff --select I --fix .",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
if __name__ == "__main__":
main()
```
154 lines
4.7 KiB
Python
154 lines
4.7 KiB
Python
"""Test ChatYuan2 wrapper."""
|
|
|
|
from typing import List
|
|
|
|
import pytest
|
|
from langchain_core.callbacks import CallbackManager
|
|
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
|
|
from langchain_core.outputs import (
|
|
ChatGeneration,
|
|
LLMResult,
|
|
)
|
|
|
|
from langchain_community.chat_models.yuan2 import ChatYuan2
|
|
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_chat_yuan2() -> None:
|
|
"""Test ChatYuan2 wrapper."""
|
|
chat = ChatYuan2( # type: ignore[call-arg]
|
|
yuan2_api_key="EMPTY",
|
|
yuan2_api_base="http://127.0.0.1:8001/v1",
|
|
temperature=1.0,
|
|
model_name="yuan2",
|
|
max_retries=3,
|
|
streaming=False,
|
|
)
|
|
messages = [
|
|
HumanMessage(content="Hello"),
|
|
]
|
|
response = chat.invoke(messages)
|
|
assert isinstance(response, BaseMessage)
|
|
assert isinstance(response.content, str)
|
|
|
|
|
|
def test_chat_yuan2_system_message() -> None:
|
|
"""Test ChatYuan2 wrapper with system message."""
|
|
chat = ChatYuan2( # type: ignore[call-arg]
|
|
yuan2_api_key="EMPTY",
|
|
yuan2_api_base="http://127.0.0.1:8001/v1",
|
|
temperature=1.0,
|
|
model_name="yuan2",
|
|
max_retries=3,
|
|
streaming=False,
|
|
)
|
|
messages = [
|
|
SystemMessage(content="You are an AI assistant."),
|
|
HumanMessage(content="Hello"),
|
|
]
|
|
response = chat.invoke(messages)
|
|
assert isinstance(response, BaseMessage)
|
|
assert isinstance(response.content, str)
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_chat_yuan2_generate() -> None:
|
|
"""Test ChatYuan2 wrapper with generate."""
|
|
chat = ChatYuan2( # type: ignore[call-arg]
|
|
yuan2_api_key="EMPTY",
|
|
yuan2_api_base="http://127.0.0.1:8001/v1",
|
|
temperature=1.0,
|
|
model_name="yuan2",
|
|
max_retries=3,
|
|
streaming=False,
|
|
)
|
|
messages: List = [
|
|
HumanMessage(content="Hello"),
|
|
]
|
|
response = chat.generate([messages])
|
|
assert isinstance(response, LLMResult)
|
|
assert len(response.generations) == 1
|
|
assert response.llm_output
|
|
generation = response.generations[0]
|
|
for gen in generation:
|
|
assert isinstance(gen, ChatGeneration)
|
|
assert isinstance(gen.text, str)
|
|
assert gen.text == gen.message.content
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_chat_yuan2_streaming() -> None:
|
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
|
callback_handler = FakeCallbackHandler()
|
|
callback_manager = CallbackManager([callback_handler])
|
|
|
|
chat = ChatYuan2( # type: ignore[call-arg]
|
|
yuan2_api_key="EMPTY",
|
|
yuan2_api_base="http://127.0.0.1:8001/v1",
|
|
temperature=1.0,
|
|
model_name="yuan2",
|
|
max_retries=3,
|
|
streaming=True,
|
|
callbacks=callback_manager,
|
|
)
|
|
messages = [
|
|
HumanMessage(content="Hello"),
|
|
]
|
|
response = chat.invoke(messages)
|
|
assert callback_handler.llm_streams > 0
|
|
assert isinstance(response, BaseMessage)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_async_chat_yuan2() -> None:
|
|
"""Test async generation."""
|
|
chat = ChatYuan2( # type: ignore[call-arg]
|
|
yuan2_api_key="EMPTY",
|
|
yuan2_api_base="http://127.0.0.1:8001/v1",
|
|
temperature=1.0,
|
|
model_name="yuan2",
|
|
max_retries=3,
|
|
streaming=False,
|
|
)
|
|
messages: List = [
|
|
HumanMessage(content="Hello"),
|
|
]
|
|
response = await chat.agenerate([messages])
|
|
assert isinstance(response, LLMResult)
|
|
assert len(response.generations) == 1
|
|
generations = response.generations[0]
|
|
for generation in generations:
|
|
assert isinstance(generation, ChatGeneration)
|
|
assert isinstance(generation.text, str)
|
|
assert generation.text == generation.message.content
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_async_chat_yuan2_streaming() -> None:
|
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
|
callback_handler = FakeCallbackHandler()
|
|
callback_manager = CallbackManager([callback_handler])
|
|
|
|
chat = ChatYuan2( # type: ignore[call-arg]
|
|
yuan2_api_key="EMPTY",
|
|
yuan2_api_base="http://127.0.0.1:8001/v1",
|
|
temperature=1.0,
|
|
model_name="yuan2",
|
|
max_retries=3,
|
|
streaming=True,
|
|
callbacks=callback_manager,
|
|
)
|
|
messages: List = [
|
|
HumanMessage(content="Hello"),
|
|
]
|
|
response = await chat.agenerate([messages])
|
|
assert callback_handler.llm_streams > 0
|
|
assert isinstance(response, LLMResult)
|
|
assert len(response.generations) == 1
|
|
generations = response.generations[0]
|
|
for generation in generations:
|
|
assert isinstance(generation, ChatGeneration)
|
|
assert isinstance(generation.text, str)
|
|
assert generation.text == generation.message.content
|