mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-13 13:36:15 +00:00
Add Anthropic ChatModel to langchain (#2293)
* Adds an Anthropic ChatModel * Factors out common code in our LLMModel and ChatModel * Supports streaming llm-tokens to the callbacks on a delta basis (until a future V2 API does that for us) * Some fixes
This commit is contained in:
81
tests/integration_tests/chat_models/test_anthropic.py
Normal file
81
tests/integration_tests/chat_models/test_anthropic.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""Test Anthropic API wrapper."""
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.callbacks.base import CallbackManager
|
||||
from langchain.chat_models.anthropic import ChatAnthropic
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ChatGeneration,
|
||||
HumanMessage,
|
||||
LLMResult,
|
||||
)
|
||||
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
||||
|
||||
|
||||
def test_anthropic_call() -> None:
|
||||
"""Test valid call to anthropic."""
|
||||
chat = ChatAnthropic(model="bare-nano-0")
|
||||
message = HumanMessage(content="Hello")
|
||||
response = chat([message])
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
|
||||
def test_anthropic_streaming() -> None:
|
||||
"""Test streaming tokens from anthropic."""
|
||||
chat = ChatAnthropic(model="bare-nano-0", streaming=True)
|
||||
message = HumanMessage(content="Hello")
|
||||
response = chat([message])
|
||||
assert isinstance(response, AIMessage)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
|
||||
def test_anthropic_streaming_callback() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
chat = ChatAnthropic(
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
message = HumanMessage(content="Write me a sentence with 100 words.")
|
||||
chat([message])
|
||||
assert callback_handler.llm_streams > 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_anthropic_async_streaming_callback() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
chat = ChatAnthropic(
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
chat_messages: List[BaseMessage] = [
|
||||
HumanMessage(content="How many toes do dogs have?")
|
||||
]
|
||||
result: LLMResult = await chat.agenerate([chat_messages])
|
||||
assert callback_handler.llm_streams > 1
|
||||
assert isinstance(result, LLMResult)
|
||||
for response in result.generations[0]:
|
||||
assert isinstance(response, ChatGeneration)
|
||||
assert isinstance(response.text, str)
|
||||
assert response.text == response.message.content
|
||||
|
||||
|
||||
def test_formatting() -> None:
|
||||
chat = ChatAnthropic()
|
||||
|
||||
chat_messages: List[BaseMessage] = [HumanMessage(content="Hello")]
|
||||
result = chat._convert_messages_to_prompt(chat_messages)
|
||||
assert result == "\n\nHuman: Hello\n\nAssistant:"
|
||||
|
||||
chat_messages = [HumanMessage(content="Hello"), AIMessage(content="Answer:")]
|
||||
result = chat._convert_messages_to_prompt(chat_messages)
|
||||
assert result == "\n\nHuman: Hello\n\nAssistant: Answer:"
|
@@ -32,7 +32,6 @@ def test_anthropic_streaming_callback() -> None:
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
llm = Anthropic(
|
||||
model="claude-v1",
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
@@ -55,7 +54,6 @@ async def test_anthropic_async_streaming_callback() -> None:
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
llm = Anthropic(
|
||||
model="claude-v1",
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
|
Reference in New Issue
Block a user