mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-08 14:31:55 +00:00
core[patch]: Add LLM output to message response_metadata (#19158)
This will more easily expose token usage information. CC @baskaryan --------- Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
@@ -615,6 +615,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
generation.message.response_metadata = _gen_info_and_msg_metadata(
|
||||
generation
|
||||
)
|
||||
if len(result.generations) == 1 and result.llm_output is not None:
|
||||
result.generations[0].message.response_metadata = {
|
||||
**result.llm_output,
|
||||
**result.generations[0].message.response_metadata,
|
||||
}
|
||||
if check_cache and llm_cache:
|
||||
llm_cache.update(prompt, llm_string, result.generations)
|
||||
return result
|
||||
@@ -651,6 +656,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
generation.message.response_metadata = _gen_info_and_msg_metadata(
|
||||
generation
|
||||
)
|
||||
if len(result.generations) == 1 and result.llm_output is not None:
|
||||
result.generations[0].message.response_metadata = {
|
||||
**result.llm_output,
|
||||
**result.generations[0].message.response_metadata,
|
||||
}
|
||||
if check_cache and llm_cache:
|
||||
await llm_cache.aupdate(prompt, llm_string, result.generations)
|
||||
return result
|
||||
|
Reference in New Issue
Block a user