mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-27 08:58:48 +00:00
Note to self: Always run integration tests, even on "that last minute change you thought would be safe" :) --------- Co-authored-by: Mike Lambert <mike.lambert@anthropic.com>
This commit is contained in:
parent
13a0ed064b
commit
ec59e9d886
171
docs/modules/models/chat/integrations/anthropic.ipynb
Normal file
171
docs/modules/models/chat/integrations/anthropic.ipynb
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "bf733a38-db84-4363-89e2-de6735c37230",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Anthropic\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook covers how to get started with Anthropic chat models."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.chat_models import ChatAnthropic\n",
|
||||||
|
"from langchain.prompts.chat import (\n",
|
||||||
|
" ChatPromptTemplate,\n",
|
||||||
|
" SystemMessagePromptTemplate,\n",
|
||||||
|
" AIMessagePromptTemplate,\n",
|
||||||
|
" HumanMessagePromptTemplate,\n",
|
||||||
|
")\n",
|
||||||
|
"from langchain.schema import (\n",
|
||||||
|
" AIMessage,\n",
|
||||||
|
" HumanMessage,\n",
|
||||||
|
" SystemMessage\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chat = ChatAnthropic()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content=\" J'adore programmer.\", additional_kwargs={})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"messages = [\n",
|
||||||
|
" HumanMessage(content=\"Translate this sentence from English to French. I love programming.\")\n",
|
||||||
|
"]\n",
|
||||||
|
"chat(messages)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "c361ab1e-8c0c-4206-9e3c-9d1424a12b9c",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## `ChatAnthropic` also supports async and streaming functionality:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "93a21c5c-6ef9-4688-be60-b2e1f94842fb",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.callbacks.base import CallbackManager\n",
|
||||||
|
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "c5fac0e9-05a4-4fc1-a3b3-e5bbb24b971b",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"LLMResult(generations=[[ChatGeneration(text=\" J'aime programmer.\", generation_info=None, message=AIMessage(content=\" J'aime programmer.\", additional_kwargs={}))]], llm_output={})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"await chat.agenerate([messages])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "025be980-e50d-4a68-93dc-c9c7b500ce34",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
" J'aime la programmation."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content=\" J'aime la programmation.\", additional_kwargs={})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chat = ChatAnthropic(streaming=True, verbose=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))\n",
|
||||||
|
"chat(messages)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -1,5 +1,6 @@
|
|||||||
|
from langchain.chat_models.anthropic import ChatAnthropic
|
||||||
from langchain.chat_models.azure_openai import AzureChatOpenAI
|
from langchain.chat_models.azure_openai import AzureChatOpenAI
|
||||||
from langchain.chat_models.openai import ChatOpenAI
|
from langchain.chat_models.openai import ChatOpenAI
|
||||||
from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI
|
from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI
|
||||||
|
|
||||||
__all__ = ["ChatOpenAI", "AzureChatOpenAI", "PromptLayerChatOpenAI"]
|
__all__ = ["ChatOpenAI", "AzureChatOpenAI", "PromptLayerChatOpenAI", "ChatAnthropic"]
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from typing import List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from pydantic import Extra
|
from pydantic import Extra
|
||||||
|
|
||||||
@ -26,17 +26,7 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
import anthropic
|
import anthropic
|
||||||
from langchain.llms import Anthropic
|
from langchain.llms import Anthropic
|
||||||
model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key")
|
model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key")
|
||||||
|
|
||||||
# Simplest invocation, automatically wrapped with HUMAN_PROMPT
|
|
||||||
# and AI_PROMPT.
|
|
||||||
response = model("What are the biggest risks facing humanity?")
|
|
||||||
|
|
||||||
# Or if you want to use the chat mode, build a few-shot-prompt, or
|
|
||||||
# put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT:
|
|
||||||
raw_prompt = "What are the biggest risks facing humanity?"
|
|
||||||
prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}"
|
|
||||||
response = model(prompt)
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
@ -98,7 +88,9 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
|||||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||||
) -> ChatResult:
|
) -> ChatResult:
|
||||||
prompt = self._convert_messages_to_prompt(messages)
|
prompt = self._convert_messages_to_prompt(messages)
|
||||||
params = {"prompt": prompt, "stop_sequences": stop, **self._default_params}
|
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
|
||||||
|
if stop:
|
||||||
|
params["stop_sequences"] = stop
|
||||||
|
|
||||||
if self.streaming:
|
if self.streaming:
|
||||||
completion = ""
|
completion = ""
|
||||||
@ -120,7 +112,9 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
|||||||
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
|
||||||
) -> ChatResult:
|
) -> ChatResult:
|
||||||
prompt = self._convert_messages_to_prompt(messages)
|
prompt = self._convert_messages_to_prompt(messages)
|
||||||
params = {"prompt": prompt, "stop_sequences": stop, **self._default_params}
|
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
|
||||||
|
if stop:
|
||||||
|
params["stop_sequences"] = stop
|
||||||
|
|
||||||
if self.streaming:
|
if self.streaming:
|
||||||
completion = ""
|
completion = ""
|
||||||
|
@ -10,7 +10,7 @@ from langchain.utils import get_from_dict_or_env
|
|||||||
|
|
||||||
class _AnthropicCommon(BaseModel):
|
class _AnthropicCommon(BaseModel):
|
||||||
client: Any = None #: :meta private:
|
client: Any = None #: :meta private:
|
||||||
model: str = "claude-latest"
|
model: str = "claude-v1"
|
||||||
"""Model name to use."""
|
"""Model name to use."""
|
||||||
|
|
||||||
max_tokens_to_sample: int = 256
|
max_tokens_to_sample: int = 256
|
||||||
|
2
poetry.lock
generated
2
poetry.lock
generated
@ -9035,4 +9035,4 @@ qdrant = ["qdrant-client"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = ">=3.8.1,<4.0"
|
python-versions = ">=3.8.1,<4.0"
|
||||||
content-hash = "373f68ef16e7f3d5d9cde8b81c5f261096cc537ddca4f6a36711d7215b63f226"
|
content-hash = "7e343fa8e31d8fcf1023cbda592f64c05e80015c4e0e23c1d387d2e9671ce995"
|
||||||
|
@ -36,7 +36,7 @@ pinecone-text = {version = "^0.4.2", optional = true}
|
|||||||
weaviate-client = {version = "^3", optional = true}
|
weaviate-client = {version = "^3", optional = true}
|
||||||
google-api-python-client = {version = "2.70.0", optional = true}
|
google-api-python-client = {version = "2.70.0", optional = true}
|
||||||
wolframalpha = {version = "5.0.0", optional = true}
|
wolframalpha = {version = "5.0.0", optional = true}
|
||||||
anthropic = {version = "^0.2.4", optional = true}
|
anthropic = {version = "^0.2.6", optional = true}
|
||||||
qdrant-client = {version = "^1.1.2", optional = true, python = ">=3.8.1,<3.12"}
|
qdrant-client = {version = "^1.1.2", optional = true, python = ">=3.8.1,<3.12"}
|
||||||
dataclasses-json = "^0.5.7"
|
dataclasses-json = "^0.5.7"
|
||||||
tensorflow-text = {version = "^2.11.0", optional = true, python = "^3.10, <3.12"}
|
tensorflow-text = {version = "^2.11.0", optional = true, python = "^3.10, <3.12"}
|
||||||
|
@ -17,7 +17,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
|||||||
|
|
||||||
def test_anthropic_call() -> None:
|
def test_anthropic_call() -> None:
|
||||||
"""Test valid call to anthropic."""
|
"""Test valid call to anthropic."""
|
||||||
chat = ChatAnthropic(model="bare-nano-0")
|
chat = ChatAnthropic(model="test")
|
||||||
message = HumanMessage(content="Hello")
|
message = HumanMessage(content="Hello")
|
||||||
response = chat([message])
|
response = chat([message])
|
||||||
assert isinstance(response, AIMessage)
|
assert isinstance(response, AIMessage)
|
||||||
@ -26,7 +26,7 @@ def test_anthropic_call() -> None:
|
|||||||
|
|
||||||
def test_anthropic_streaming() -> None:
|
def test_anthropic_streaming() -> None:
|
||||||
"""Test streaming tokens from anthropic."""
|
"""Test streaming tokens from anthropic."""
|
||||||
chat = ChatAnthropic(model="bare-nano-0", streaming=True)
|
chat = ChatAnthropic(model="test", streaming=True)
|
||||||
message = HumanMessage(content="Hello")
|
message = HumanMessage(content="Hello")
|
||||||
response = chat([message])
|
response = chat([message])
|
||||||
assert isinstance(response, AIMessage)
|
assert isinstance(response, AIMessage)
|
||||||
@ -38,11 +38,12 @@ def test_anthropic_streaming_callback() -> None:
|
|||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
callback_manager = CallbackManager([callback_handler])
|
callback_manager = CallbackManager([callback_handler])
|
||||||
chat = ChatAnthropic(
|
chat = ChatAnthropic(
|
||||||
|
model="test",
|
||||||
streaming=True,
|
streaming=True,
|
||||||
callback_manager=callback_manager,
|
callback_manager=callback_manager,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
)
|
)
|
||||||
message = HumanMessage(content="Write me a sentence with 100 words.")
|
message = HumanMessage(content="Write me a sentence with 10 words.")
|
||||||
chat([message])
|
chat([message])
|
||||||
assert callback_handler.llm_streams > 1
|
assert callback_handler.llm_streams > 1
|
||||||
|
|
||||||
@ -53,6 +54,7 @@ async def test_anthropic_async_streaming_callback() -> None:
|
|||||||
callback_handler = FakeCallbackHandler()
|
callback_handler = FakeCallbackHandler()
|
||||||
callback_manager = CallbackManager([callback_handler])
|
callback_manager = CallbackManager([callback_handler])
|
||||||
chat = ChatAnthropic(
|
chat = ChatAnthropic(
|
||||||
|
model="test",
|
||||||
streaming=True,
|
streaming=True,
|
||||||
callback_manager=callback_manager,
|
callback_manager=callback_manager,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
|
Loading…
Reference in New Issue
Block a user