mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-29 06:23:20 +00:00
take usage from end of stream
This commit is contained in:
parent
f5f6e869cd
commit
3e262334ce
@ -1759,15 +1759,13 @@ def _make_message_chunk_from_anthropic_event(
|
|||||||
"""
|
"""
|
||||||
message_chunk: Optional[AIMessageChunk] = None
|
message_chunk: Optional[AIMessageChunk] = None
|
||||||
# See https://github.com/anthropics/anthropic-sdk-python/blob/main/src/anthropic/lib/streaming/_messages.py # noqa: E501
|
# See https://github.com/anthropics/anthropic-sdk-python/blob/main/src/anthropic/lib/streaming/_messages.py # noqa: E501
|
||||||
if event.type == "message_start" and stream_usage:
|
if event.type == "message_start":
|
||||||
usage_metadata = _create_usage_metadata(event.message.usage)
|
|
||||||
if hasattr(event.message, "model"):
|
if hasattr(event.message, "model"):
|
||||||
response_metadata = {"model_name": event.message.model}
|
response_metadata = {"model_name": event.message.model}
|
||||||
else:
|
else:
|
||||||
response_metadata = {}
|
response_metadata = {}
|
||||||
message_chunk = AIMessageChunk(
|
message_chunk = AIMessageChunk(
|
||||||
content="" if coerce_content_to_string else [],
|
content="" if coerce_content_to_string else [],
|
||||||
usage_metadata=usage_metadata,
|
|
||||||
response_metadata=response_metadata,
|
response_metadata=response_metadata,
|
||||||
)
|
)
|
||||||
elif (
|
elif (
|
||||||
|
@ -46,7 +46,7 @@ def test_stream() -> None:
|
|||||||
if token.usage_metadata is not None:
|
if token.usage_metadata is not None:
|
||||||
if token.usage_metadata.get("input_tokens"):
|
if token.usage_metadata.get("input_tokens"):
|
||||||
chunks_with_input_token_counts += 1
|
chunks_with_input_token_counts += 1
|
||||||
elif token.usage_metadata.get("output_tokens"):
|
if token.usage_metadata.get("output_tokens"):
|
||||||
chunks_with_output_token_counts += 1
|
chunks_with_output_token_counts += 1
|
||||||
chunks_with_model_name += int("model_name" in token.response_metadata)
|
chunks_with_model_name += int("model_name" in token.response_metadata)
|
||||||
if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1:
|
if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1:
|
||||||
@ -85,7 +85,7 @@ async def test_astream() -> None:
|
|||||||
if token.usage_metadata is not None:
|
if token.usage_metadata is not None:
|
||||||
if token.usage_metadata.get("input_tokens"):
|
if token.usage_metadata.get("input_tokens"):
|
||||||
chunks_with_input_token_counts += 1
|
chunks_with_input_token_counts += 1
|
||||||
elif token.usage_metadata.get("output_tokens"):
|
if token.usage_metadata.get("output_tokens"):
|
||||||
chunks_with_output_token_counts += 1
|
chunks_with_output_token_counts += 1
|
||||||
if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1:
|
if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1:
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
|
Loading…
Reference in New Issue
Block a user