mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-22 14:49:29 +00:00
langchain-groq: Add response metadata when streaming (#30379)
- **Description:** Add missing `model_name` and `system_fingerprint` metadata when streaming. --------- Co-authored-by: Chester Curme <chester.curme@gmail.com>
This commit is contained in:
parent
e2d9fe766f
commit
df4448dfac
@ -541,6 +541,9 @@ class ChatGroq(BaseChatModel):
|
||||
generation_info = {}
|
||||
if finish_reason := choice.get("finish_reason"):
|
||||
generation_info["finish_reason"] = finish_reason
|
||||
generation_info["model_name"] = self.model_name
|
||||
if system_fingerprint := chunk.get("system_fingerprint"):
|
||||
generation_info["system_fingerprint"] = system_fingerprint
|
||||
logprobs = choice.get("logprobs")
|
||||
if logprobs:
|
||||
generation_info["logprobs"] = logprobs
|
||||
@ -579,6 +582,9 @@ class ChatGroq(BaseChatModel):
|
||||
generation_info = {}
|
||||
if finish_reason := choice.get("finish_reason"):
|
||||
generation_info["finish_reason"] = finish_reason
|
||||
generation_info["model_name"] = self.model_name
|
||||
if system_fingerprint := chunk.get("system_fingerprint"):
|
||||
generation_info["system_fingerprint"] = system_fingerprint
|
||||
logprobs = choice.get("logprobs")
|
||||
if logprobs:
|
||||
generation_info["logprobs"] = logprobs
|
||||
|
@ -98,16 +98,19 @@ async def test_astream() -> None:
|
||||
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
chunks_with_token_counts = 0
|
||||
chunks_with_response_metadata = 0
|
||||
async for token in chat.astream("Welcome to the Groqetship!"):
|
||||
assert isinstance(token, AIMessageChunk)
|
||||
assert isinstance(token.content, str)
|
||||
full = token if full is None else full + token
|
||||
if token.usage_metadata is not None:
|
||||
chunks_with_token_counts += 1
|
||||
if chunks_with_token_counts != 1:
|
||||
if token.response_metadata:
|
||||
chunks_with_response_metadata += 1
|
||||
if chunks_with_token_counts != 1 or chunks_with_response_metadata != 1:
|
||||
raise AssertionError(
|
||||
"Expected exactly one chunk with token counts. "
|
||||
"AIMessageChunk aggregation adds counts. Check that "
|
||||
"Expected exactly one chunk with token counts or metadata. "
|
||||
"AIMessageChunk aggregation adds / appends these metadata. Check that "
|
||||
"this is behaving properly."
|
||||
)
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
@ -118,6 +121,8 @@ async def test_astream() -> None:
|
||||
full.usage_metadata["input_tokens"] + full.usage_metadata["output_tokens"]
|
||||
== full.usage_metadata["total_tokens"]
|
||||
)
|
||||
for expected_metadata in ["model_name", "system_fingerprint"]:
|
||||
assert full.response_metadata[expected_metadata]
|
||||
|
||||
|
||||
#
|
||||
|
Loading…
Reference in New Issue
Block a user