mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-27 13:31:53 +00:00
langchain_anthropic: add stop_reason in ChatAnthropic stream result (#23689)
`ChatAnthropic` can get `stop_reason` from the resulting `AIMessage` in `invoke` and `ainvoke`, but not in `stream` and `astream`. This is a different behavior from `ChatOpenAI`. It is possible to get `stop_reason` from `stream` as well, since it is needed to determine the next action after the LLM call. This would be easier to handle in situations where only `stop_reason` is needed. - Issue: NA - Dependencies: NA - Twitter handle: https://x.com/kiarina37
This commit is contained in:
parent
27ce58f86e
commit
dc396835ed
@ -518,7 +518,7 @@ class ChatAnthropic(BaseChatModel):
|
|||||||
|
|
||||||
anthropic_api_url: Optional[str] = Field(None, alias="base_url")
|
anthropic_api_url: Optional[str] = Field(None, alias="base_url")
|
||||||
"""Base URL for API requests. Only specify if using a proxy or service emulator.
|
"""Base URL for API requests. Only specify if using a proxy or service emulator.
|
||||||
|
|
||||||
If a value isn't passed in and environment variable ANTHROPIC_BASE_URL is set, value
|
If a value isn't passed in and environment variable ANTHROPIC_BASE_URL is set, value
|
||||||
will be read from there.
|
will be read from there.
|
||||||
"""
|
"""
|
||||||
@ -1139,6 +1139,10 @@ def _make_message_chunk_from_anthropic_event(
|
|||||||
output_tokens=output_tokens,
|
output_tokens=output_tokens,
|
||||||
total_tokens=output_tokens,
|
total_tokens=output_tokens,
|
||||||
),
|
),
|
||||||
|
response_metadata={
|
||||||
|
"stop_reason": event.delta.stop_reason,
|
||||||
|
"stop_sequence": event.delta.stop_sequence,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
@ -57,6 +57,8 @@ def test_stream() -> None:
|
|||||||
full.usage_metadata["input_tokens"] + full.usage_metadata["output_tokens"]
|
full.usage_metadata["input_tokens"] + full.usage_metadata["output_tokens"]
|
||||||
== full.usage_metadata["total_tokens"]
|
== full.usage_metadata["total_tokens"]
|
||||||
)
|
)
|
||||||
|
assert "stop_reason" in full.response_metadata
|
||||||
|
assert "stop_sequence" in full.response_metadata
|
||||||
|
|
||||||
|
|
||||||
async def test_astream() -> None:
|
async def test_astream() -> None:
|
||||||
@ -91,6 +93,8 @@ async def test_astream() -> None:
|
|||||||
full.usage_metadata["input_tokens"] + full.usage_metadata["output_tokens"]
|
full.usage_metadata["input_tokens"] + full.usage_metadata["output_tokens"]
|
||||||
== full.usage_metadata["total_tokens"]
|
== full.usage_metadata["total_tokens"]
|
||||||
)
|
)
|
||||||
|
assert "stop_reason" in full.response_metadata
|
||||||
|
assert "stop_sequence" in full.response_metadata
|
||||||
|
|
||||||
# test usage metadata can be excluded
|
# test usage metadata can be excluded
|
||||||
model = ChatAnthropic(model_name=MODEL_NAME, stream_usage=False) # type: ignore[call-arg]
|
model = ChatAnthropic(model_name=MODEL_NAME, stream_usage=False) # type: ignore[call-arg]
|
||||||
|
Loading…
Reference in New Issue
Block a user