add anthropic scheduled tests and unit tests (#11188)

This commit is contained in:
Bagatur 2023-09-28 11:47:29 -07:00 committed by GitHub
parent fd96878c4b
commit 3508e582f1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 113 additions and 512 deletions

View File

@ -52,6 +52,7 @@ jobs:
shell: bash
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
run: |
make scheduled_tests

File diff suppressed because it is too large Load Diff

View File

@ -132,6 +132,7 @@ sqlite-vss = {version = "^0.1.2", optional = true}
anyio = "<4.0"
jsonpatch = "^1.33"
timescale-vector = {version = "^0.0.1", optional = true}
anthropic = {version = "^0.3.11", optional = true}
[tool.poetry.group.test.dependencies]
@ -180,6 +181,7 @@ openai = "^0.27.4"
python-dotenv = "^1.0.0"
cassio = "^0.1.0"
tiktoken = "^0.3.2"
anthropic = "^0.3.11"
[tool.poetry.group.lint.dependencies]
ruff = "^0.0.249"
@ -350,6 +352,7 @@ extended_testing = [
"dashvector",
"sqlite-vss",
"timescale-vector",
"anthropic",
]
[tool.ruff]

View File

@ -6,13 +6,13 @@ import pytest
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models.anthropic import (
ChatAnthropic,
convert_messages_to_prompt_anthropic,
)
from langchain.schema import ChatGeneration, LLMResult
from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
@pytest.mark.scheduled
def test_anthropic_call() -> None:
"""Test valid call to anthropic."""
chat = ChatAnthropic(model="test")
@ -22,13 +22,7 @@ def test_anthropic_call() -> None:
assert isinstance(response.content, str)
def test_anthropic_initialization() -> None:
"""Test anthropic initialization."""
# Verify that chat anthropic can be initialized using a secret key provided
# as a parameter rather than an environment variable.
ChatAnthropic(model="test", anthropic_api_key="test")
@pytest.mark.scheduled
def test_anthropic_generate() -> None:
"""Test generate method of anthropic."""
chat = ChatAnthropic(model="test")
@ -45,6 +39,7 @@ def test_anthropic_generate() -> None:
assert chat_messages == messages_copy
@pytest.mark.scheduled
def test_anthropic_streaming() -> None:
"""Test streaming tokens from anthropic."""
chat = ChatAnthropic(model="test", streaming=True)
@ -54,6 +49,7 @@ def test_anthropic_streaming() -> None:
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_anthropic_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
@ -69,6 +65,7 @@ def test_anthropic_streaming_callback() -> None:
assert callback_handler.llm_streams > 1
@pytest.mark.scheduled
@pytest.mark.asyncio
async def test_anthropic_async_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
@ -90,29 +87,3 @@ async def test_anthropic_async_streaming_callback() -> None:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
def test_formatting() -> None:
messages: List[BaseMessage] = [HumanMessage(content="Hello")]
result = convert_messages_to_prompt_anthropic(messages)
assert result == "\n\nHuman: Hello\n\nAssistant:"
messages = [HumanMessage(content="Hello"), AIMessage(content="Answer:")]
result = convert_messages_to_prompt_anthropic(messages)
assert result == "\n\nHuman: Hello\n\nAssistant: Answer:"
def test_anthropic_model_kwargs() -> None:
llm = ChatAnthropic(model_kwargs={"foo": "bar"})
assert llm.model_kwargs == {"foo": "bar"}
def test_anthropic_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
ChatAnthropic(model_kwargs={"max_tokens_to_sample": 5})
def test_anthropic_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = ChatAnthropic(foo="bar")
assert llm.model_kwargs == {"foo": "bar"}

View File

@ -1,9 +1,12 @@
"""Test Anthropic Chat API wrapper."""
import os
from typing import List
import pytest
from langchain.chat_models import ChatAnthropic
from langchain.chat_models.anthropic import convert_messages_to_prompt_anthropic
from langchain.schema import AIMessage, BaseMessage, HumanMessage
os.environ["ANTHROPIC_API_KEY"] = "foo"
@ -37,3 +40,21 @@ def test_anthropic_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = ChatAnthropic(foo="bar")
assert llm.model_kwargs == {"foo": "bar"}
@pytest.mark.requires("anthropic")
def test_anthropic_initialization() -> None:
"""Test anthropic initialization."""
# Verify that chat anthropic can be initialized using a secret key provided
# as a parameter rather than an environment variable.
ChatAnthropic(model="test", anthropic_api_key="test")
def test_formatting() -> None:
messages: List[BaseMessage] = [HumanMessage(content="Hello")]
result = convert_messages_to_prompt_anthropic(messages)
assert result == "\n\nHuman: Hello\n\nAssistant:"
messages = [HumanMessage(content="Hello"), AIMessage(content="Answer:")]
result = convert_messages_to_prompt_anthropic(messages)
assert result == "\n\nHuman: Hello\n\nAssistant: Answer:"