mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-20 05:43:55 +00:00
anthropic, mistral: return model_name
in response metadata (#30048)
Took a "census" of models supported by init_chat_model-- of those that return model names in response metadata, these were the only two that had it keyed under `"model"` instead of `"model_name"`.
This commit is contained in:
parent
9e6ffd1264
commit
f8ed5007ea
@ -900,6 +900,8 @@ class ChatAnthropic(BaseChatModel):
|
||||
llm_output = {
|
||||
k: v for k, v in data_dict.items() if k not in ("content", "role", "type")
|
||||
}
|
||||
if "model" in llm_output and "model_name" not in llm_output:
|
||||
llm_output["model_name"] = llm_output["model"]
|
||||
if (
|
||||
len(content) == 1
|
||||
and content[0]["type"] == "text"
|
||||
@ -1445,9 +1447,14 @@ def _make_message_chunk_from_anthropic_event(
|
||||
# See https://github.com/anthropics/anthropic-sdk-python/blob/main/src/anthropic/lib/streaming/_messages.py # noqa: E501
|
||||
if event.type == "message_start" and stream_usage:
|
||||
usage_metadata = _create_usage_metadata(event.message.usage)
|
||||
if hasattr(event.message, "model"):
|
||||
response_metadata = {"model_name": event.message.model}
|
||||
else:
|
||||
response_metadata = {}
|
||||
message_chunk = AIMessageChunk(
|
||||
content="" if coerce_content_to_string else [],
|
||||
usage_metadata=usage_metadata,
|
||||
response_metadata=response_metadata,
|
||||
)
|
||||
elif (
|
||||
event.type == "content_block_start"
|
||||
|
@ -35,6 +35,7 @@ def test_stream() -> None:
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
chunks_with_input_token_counts = 0
|
||||
chunks_with_output_token_counts = 0
|
||||
chunks_with_model_name = 0
|
||||
for token in llm.stream("I'm Pickle Rick"):
|
||||
assert isinstance(token.content, str)
|
||||
full = token if full is None else full + token
|
||||
@ -44,12 +45,14 @@ def test_stream() -> None:
|
||||
chunks_with_input_token_counts += 1
|
||||
elif token.usage_metadata.get("output_tokens"):
|
||||
chunks_with_output_token_counts += 1
|
||||
chunks_with_model_name += int("model_name" in token.response_metadata)
|
||||
if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1:
|
||||
raise AssertionError(
|
||||
"Expected exactly one chunk with input or output token counts. "
|
||||
"AIMessageChunk aggregation adds counts. Check that "
|
||||
"this is behaving properly."
|
||||
)
|
||||
assert chunks_with_model_name == 1
|
||||
# check token usage is populated
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
assert full.usage_metadata is not None
|
||||
@ -62,6 +65,7 @@ def test_stream() -> None:
|
||||
)
|
||||
assert "stop_reason" in full.response_metadata
|
||||
assert "stop_sequence" in full.response_metadata
|
||||
assert "model_name" in full.response_metadata
|
||||
|
||||
|
||||
async def test_astream() -> None:
|
||||
@ -219,6 +223,7 @@ async def test_ainvoke() -> None:
|
||||
|
||||
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||
assert isinstance(result.content, str)
|
||||
assert "model_name" in result.response_metadata
|
||||
|
||||
|
||||
def test_invoke() -> None:
|
||||
|
@ -579,7 +579,11 @@ class ChatMistralAI(BaseChatModel):
|
||||
)
|
||||
generations.append(gen)
|
||||
|
||||
llm_output = {"token_usage": token_usage, "model": self.model}
|
||||
llm_output = {
|
||||
"token_usage": token_usage,
|
||||
"model_name": self.model,
|
||||
"model": self.model, # Backwards compatability
|
||||
}
|
||||
return ChatResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
def _create_message_dicts(
|
||||
|
@ -87,6 +87,7 @@ async def test_ainvoke() -> None:
|
||||
|
||||
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||
assert isinstance(result.content, str)
|
||||
assert "model_name" in result.response_metadata
|
||||
|
||||
|
||||
def test_invoke() -> None:
|
||||
|
Loading…
Reference in New Issue
Block a user