diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 0b7e6f516eb..310380a4199 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -1123,6 +1123,11 @@ class ChatAnthropic(BaseChatModel): ) -> int: """Count tokens in a sequence of input messages. + Args: + messages: The message inputs to tokenize. + tools: If provided, sequence of dict, BaseModel, function, or BaseTools + to be converted to tool schemas. + .. versionchanged:: 0.2.5 Uses Anthropic's token counting API to count tokens in messages. See: diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py index 59f50efa039..0aff4ac3f0e 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py @@ -336,6 +336,7 @@ def test_anthropic_multimodal() -> None: assert isinstance(response.content, str) num_tokens = chat.get_num_tokens_from_messages(messages) assert num_tokens > 0 + import pdb; pdb.set_trace() def test_streaming() -> None: @@ -508,24 +509,34 @@ def test_with_structured_output() -> None: def test_get_num_tokens_from_messages() -> None: - llm = ChatAnthropic(model="claude-3-5-haiku-20241022") # type: ignore[call-arg] + llm = ChatAnthropic(model="claude-3-5-sonnet-20241022") # type: ignore[call-arg] # Test simple case messages = [ - SystemMessage(content="You are an assistant."), - HumanMessage(content="What is the weather in SF?"), + SystemMessage(content="You are a scientist"), + HumanMessage(content="Hello, Claude"), ] num_tokens = llm.get_num_tokens_from_messages(messages) assert num_tokens > 0 # Test tool use - @tool + @tool(parse_docstring=True) def get_weather(location: str) -> str: - """Get weather report for a city""" + """Get the current weather in a given location + + Args: + location: The city and state, e.g. San Francisco, CA + """ return "Sunny" messages = [ - HumanMessage(content="What is the weather in SF?"), + HumanMessage(content="What's the weather like in San Francisco?"), + ] + num_tokens = llm.get_num_tokens_from_messages(messages, tools=[get_weather]) + assert num_tokens > 0 + + messages = [ + HumanMessage(content="What's the weather like in San Francisco?"), AIMessage( content=[ {"text": "Let's see.", "type": "text"},