mirror of
https://github.com/hwchase17/langchain.git
synced 2025-11-24 01:22:13 +00:00
```python
"""python scripts/update_mypy_ruff.py"""
import glob
import tomllib
from pathlib import Path
import toml
import subprocess
import re
ROOT_DIR = Path(__file__).parents[1]
def main():
for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True):
print(path)
with open(path, "rb") as f:
pyproject = tomllib.load(f)
try:
pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = (
"^1.10"
)
pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = (
"^0.5"
)
except KeyError:
continue
with open(path, "w") as f:
toml.dump(pyproject, f)
cwd = "/".join(path.split("/")[:-1])
completed = subprocess.run(
"poetry lock --no-update; poetry install --with typing; poetry run mypy . --no-color",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
logs = completed.stdout.split("\n")
to_ignore = {}
for l in logs:
if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l):
path, line_no, error_type = re.match(
"^(.*)\:(\d+)\: error:.*\[(.*)\]", l
).groups()
if (path, line_no) in to_ignore:
to_ignore[(path, line_no)].append(error_type)
else:
to_ignore[(path, line_no)] = [error_type]
print(len(to_ignore))
for (error_path, line_no), error_types in to_ignore.items():
all_errors = ", ".join(error_types)
full_path = f"{cwd}/{error_path}"
try:
with open(full_path, "r") as f:
file_lines = f.readlines()
except FileNotFoundError:
continue
file_lines[int(line_no) - 1] = (
file_lines[int(line_no) - 1][:-1] + f" # type: ignore[{all_errors}]\n"
)
with open(full_path, "w") as f:
f.write("".join(file_lines))
subprocess.run(
"poetry run ruff format .; poetry run ruff --select I --fix .",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
if __name__ == "__main__":
main()
```
165 lines
5.5 KiB
Python
165 lines
5.5 KiB
Python
"""Test Bedrock chat model."""
|
|
|
|
from typing import Any, cast
|
|
|
|
import pytest
|
|
from langchain_core.callbacks import CallbackManager
|
|
from langchain_core.messages import (
|
|
AIMessageChunk,
|
|
BaseMessage,
|
|
HumanMessage,
|
|
SystemMessage,
|
|
)
|
|
from langchain_core.outputs import ChatGeneration, LLMResult
|
|
|
|
from langchain_community.chat_models import BedrockChat
|
|
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
|
|
|
|
|
@pytest.fixture
|
|
def chat() -> BedrockChat:
|
|
return BedrockChat(model_id="anthropic.claude-v2", model_kwargs={"temperature": 0}) # type: ignore[call-arg]
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_chat_bedrock(chat: BedrockChat) -> None:
|
|
"""Test BedrockChat wrapper."""
|
|
system = SystemMessage(content="You are a helpful assistant.")
|
|
human = HumanMessage(content="Hello")
|
|
response = chat.invoke([system, human])
|
|
assert isinstance(response, BaseMessage)
|
|
assert isinstance(response.content, str)
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_chat_bedrock_generate(chat: BedrockChat) -> None:
|
|
"""Test BedrockChat wrapper with generate."""
|
|
message = HumanMessage(content="Hello")
|
|
response = chat.generate([[message], [message]])
|
|
assert isinstance(response, LLMResult)
|
|
assert len(response.generations) == 2
|
|
for generations in response.generations:
|
|
for generation in generations:
|
|
assert isinstance(generation, ChatGeneration)
|
|
assert isinstance(generation.text, str)
|
|
assert generation.text == generation.message.content
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_chat_bedrock_generate_with_token_usage(chat: BedrockChat) -> None:
|
|
"""Test BedrockChat wrapper with generate."""
|
|
message = HumanMessage(content="Hello")
|
|
response = chat.generate([[message], [message]])
|
|
assert isinstance(response, LLMResult)
|
|
assert isinstance(response.llm_output, dict)
|
|
|
|
usage = response.llm_output["usage"]
|
|
assert usage["prompt_tokens"] == 20
|
|
assert usage["completion_tokens"] > 0
|
|
assert usage["total_tokens"] > 0
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_chat_bedrock_streaming() -> None:
|
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
|
callback_handler = FakeCallbackHandler()
|
|
callback_manager = CallbackManager([callback_handler])
|
|
chat = BedrockChat( # type: ignore[call-arg]
|
|
model_id="anthropic.claude-v2",
|
|
streaming=True,
|
|
callback_manager=callback_manager,
|
|
verbose=True,
|
|
)
|
|
message = HumanMessage(content="Hello")
|
|
response = chat.invoke([message])
|
|
assert callback_handler.llm_streams > 0
|
|
assert isinstance(response, BaseMessage)
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_chat_bedrock_streaming_generation_info() -> None:
|
|
"""Test that generation info is preserved when streaming."""
|
|
|
|
class _FakeCallback(FakeCallbackHandler):
|
|
saved_things: dict = {}
|
|
|
|
def on_llm_end(
|
|
self,
|
|
*args: Any,
|
|
**kwargs: Any,
|
|
) -> Any:
|
|
# Save the generation
|
|
self.saved_things["generation"] = args[0]
|
|
|
|
callback = _FakeCallback()
|
|
callback_manager = CallbackManager([callback])
|
|
chat = BedrockChat( # type: ignore[call-arg]
|
|
model_id="anthropic.claude-v2",
|
|
callback_manager=callback_manager,
|
|
)
|
|
list(chat.stream("hi"))
|
|
generation = callback.saved_things["generation"]
|
|
# `Hello!` is two tokens, assert that that is what is returned
|
|
assert generation.generations[0][0].text == "Hello!"
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_bedrock_streaming(chat: BedrockChat) -> None:
|
|
"""Test streaming tokens from OpenAI."""
|
|
|
|
full = None
|
|
for token in chat.stream("I'm Pickle Rick"):
|
|
full = token if full is None else full + token # type: ignore[operator]
|
|
assert isinstance(token.content, str)
|
|
assert isinstance(cast(AIMessageChunk, full).content, str)
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
async def test_bedrock_astream(chat: BedrockChat) -> None:
|
|
"""Test streaming tokens from OpenAI."""
|
|
|
|
async for token in chat.astream("I'm Pickle Rick"):
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
async def test_bedrock_abatch(chat: BedrockChat) -> None:
|
|
"""Test streaming tokens from BedrockChat."""
|
|
result = await chat.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
|
for token in result:
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
async def test_bedrock_abatch_tags(chat: BedrockChat) -> None:
|
|
"""Test batch tokens from BedrockChat."""
|
|
result = await chat.abatch(
|
|
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
|
|
)
|
|
for token in result:
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_bedrock_batch(chat: BedrockChat) -> None:
|
|
"""Test batch tokens from BedrockChat."""
|
|
result = chat.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
|
for token in result:
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
async def test_bedrock_ainvoke(chat: BedrockChat) -> None:
|
|
"""Test invoke tokens from BedrockChat."""
|
|
result = await chat.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
|
assert isinstance(result.content, str)
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
def test_bedrock_invoke(chat: BedrockChat) -> None:
|
|
"""Test invoke tokens from BedrockChat."""
|
|
result = chat.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
|
|
assert isinstance(result.content, str)
|
|
assert all([k in result.response_metadata for k in ("usage", "model_id")])
|
|
assert result.response_metadata["usage"]["prompt_tokens"] == 13
|