mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-02 19:47:13 +00:00
anthropic[patch]: fix response metadata type (#19683)
This commit is contained in:
@@ -268,16 +268,15 @@ class ChatAnthropic(BaseChatModel):
|
|||||||
await run_manager.on_llm_new_token(text, chunk=chunk)
|
await run_manager.on_llm_new_token(text, chunk=chunk)
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
def _format_output(
|
def _format_output(self, data: Any, **kwargs: Any) -> ChatResult:
|
||||||
self,
|
data_dict = data.model_dump()
|
||||||
data: Any,
|
content = data_dict["content"]
|
||||||
**kwargs: Any,
|
llm_output = {
|
||||||
) -> ChatResult:
|
k: v for k, v in data_dict.items() if k not in ("content", "role", "type")
|
||||||
|
}
|
||||||
return ChatResult(
|
return ChatResult(
|
||||||
generations=[
|
generations=[ChatGeneration(message=AIMessage(content=content[0]["text"]))],
|
||||||
ChatGeneration(message=AIMessage(content=data.content[0].text))
|
llm_output=llm_output,
|
||||||
],
|
|
||||||
llm_output=data,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def _generate(
|
def _generate(
|
||||||
|
@@ -3,6 +3,9 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from anthropic.types import ContentBlock, Message, Usage
|
||||||
|
from langchain_core.messages import AIMessage
|
||||||
|
from langchain_core.outputs import ChatGeneration, ChatResult
|
||||||
|
|
||||||
from langchain_anthropic import ChatAnthropic, ChatAnthropicMessages
|
from langchain_anthropic import ChatAnthropic, ChatAnthropicMessages
|
||||||
|
|
||||||
@@ -52,3 +55,31 @@ def test_anthropic_initialization() -> None:
|
|||||||
# Verify that chat anthropic can be initialized using a secret key provided
|
# Verify that chat anthropic can be initialized using a secret key provided
|
||||||
# as a parameter rather than an environment variable.
|
# as a parameter rather than an environment variable.
|
||||||
ChatAnthropic(model="test", anthropic_api_key="test")
|
ChatAnthropic(model="test", anthropic_api_key="test")
|
||||||
|
|
||||||
|
|
||||||
|
def test__format_output() -> None:
|
||||||
|
anthropic_msg = Message(
|
||||||
|
id="foo",
|
||||||
|
content=[ContentBlock(type="text", text="bar")],
|
||||||
|
model="baz",
|
||||||
|
role="assistant",
|
||||||
|
stop_reason=None,
|
||||||
|
stop_sequence=None,
|
||||||
|
usage=Usage(input_tokens=2, output_tokens=1),
|
||||||
|
type="message",
|
||||||
|
)
|
||||||
|
expected = ChatResult(
|
||||||
|
generations=[
|
||||||
|
ChatGeneration(message=AIMessage("bar")),
|
||||||
|
],
|
||||||
|
llm_output={
|
||||||
|
"id": "foo",
|
||||||
|
"model": "baz",
|
||||||
|
"stop_reason": None,
|
||||||
|
"stop_sequence": None,
|
||||||
|
"usage": {"input_tokens": 2, "output_tokens": 1},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
llm = ChatAnthropic(model="test", anthropic_api_key="test")
|
||||||
|
actual = llm._format_output(anthropic_msg)
|
||||||
|
assert expected == actual
|
||||||
|
Reference in New Issue
Block a user