mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-01 04:29:09 +00:00
anthropic[patch]: fix response metadata type (#19683)
This commit is contained in:
parent
9c4b6dc979
commit
b15c7fdde6
@ -268,16 +268,15 @@ class ChatAnthropic(BaseChatModel):
|
||||
await run_manager.on_llm_new_token(text, chunk=chunk)
|
||||
yield chunk
|
||||
|
||||
def _format_output(
|
||||
self,
|
||||
data: Any,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
|
||||
data_dict = data.model_dump()
|
||||
content = data_dict["content"]
|
||||
llm_output = {
|
||||
k: v for k, v in data_dict.items() if k not in ("content", "role", "type")
|
||||
}
|
||||
return ChatResult(
|
||||
generations=[
|
||||
ChatGeneration(message=AIMessage(content=data.content[0].text))
|
||||
],
|
||||
llm_output=data,
|
||||
generations=[ChatGeneration(message=AIMessage(content=content[0]["text"]))],
|
||||
llm_output=llm_output,
|
||||
)
|
||||
|
||||
def _generate(
|
||||
|
@ -3,6 +3,9 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from anthropic.types import ContentBlock, Message, Usage
|
||||
from langchain_core.messages import AIMessage
|
||||
from langchain_core.outputs import ChatGeneration, ChatResult
|
||||
|
||||
from langchain_anthropic import ChatAnthropic, ChatAnthropicMessages
|
||||
|
||||
@ -52,3 +55,31 @@ def test_anthropic_initialization() -> None:
|
||||
# Verify that chat anthropic can be initialized using a secret key provided
|
||||
# as a parameter rather than an environment variable.
|
||||
ChatAnthropic(model="test", anthropic_api_key="test")
|
||||
|
||||
|
||||
def test__format_output() -> None:
|
||||
anthropic_msg = Message(
|
||||
id="foo",
|
||||
content=[ContentBlock(type="text", text="bar")],
|
||||
model="baz",
|
||||
role="assistant",
|
||||
stop_reason=None,
|
||||
stop_sequence=None,
|
||||
usage=Usage(input_tokens=2, output_tokens=1),
|
||||
type="message",
|
||||
)
|
||||
expected = ChatResult(
|
||||
generations=[
|
||||
ChatGeneration(message=AIMessage("bar")),
|
||||
],
|
||||
llm_output={
|
||||
"id": "foo",
|
||||
"model": "baz",
|
||||
"stop_reason": None,
|
||||
"stop_sequence": None,
|
||||
"usage": {"input_tokens": 2, "output_tokens": 1},
|
||||
},
|
||||
)
|
||||
llm = ChatAnthropic(model="test", anthropic_api_key="test")
|
||||
actual = llm._format_output(anthropic_msg)
|
||||
assert expected == actual
|
||||
|
Loading…
Reference in New Issue
Block a user