mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-24 07:35:18 +00:00
Record system fingerprint chat openai (#12960)
This commit is contained in:
parent
8e0cb2eb84
commit
4f7dff9d66
@ -342,6 +342,7 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
|
|
||||||
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||||
overall_token_usage: dict = {}
|
overall_token_usage: dict = {}
|
||||||
|
system_fingerprint = None
|
||||||
for output in llm_outputs:
|
for output in llm_outputs:
|
||||||
if output is None:
|
if output is None:
|
||||||
# Happens in streaming
|
# Happens in streaming
|
||||||
@ -352,7 +353,12 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
overall_token_usage[k] += v
|
overall_token_usage[k] += v
|
||||||
else:
|
else:
|
||||||
overall_token_usage[k] = v
|
overall_token_usage[k] = v
|
||||||
return {"token_usage": overall_token_usage, "model_name": self.model_name}
|
if system_fingerprint is None:
|
||||||
|
system_fingerprint = output.get("system_fingerprint")
|
||||||
|
combined = {"token_usage": overall_token_usage, "model_name": self.model_name}
|
||||||
|
if system_fingerprint:
|
||||||
|
combined["system_fingerprint"] = system_fingerprint
|
||||||
|
return combined
|
||||||
|
|
||||||
def _stream(
|
def _stream(
|
||||||
self,
|
self,
|
||||||
@ -430,7 +436,11 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
)
|
)
|
||||||
generations.append(gen)
|
generations.append(gen)
|
||||||
token_usage = response.get("usage", {})
|
token_usage = response.get("usage", {})
|
||||||
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
|
llm_output = {
|
||||||
|
"token_usage": token_usage,
|
||||||
|
"model_name": self.model_name,
|
||||||
|
"system_fingerprint": response.get("system_fingerprint", ""),
|
||||||
|
}
|
||||||
return ChatResult(generations=generations, llm_output=llm_output)
|
return ChatResult(generations=generations, llm_output=llm_output)
|
||||||
|
|
||||||
async def _astream(
|
async def _astream(
|
||||||
|
@ -58,6 +58,8 @@ def test_chat_openai_generate() -> None:
|
|||||||
response = chat.generate([[message], [message]])
|
response = chat.generate([[message], [message]])
|
||||||
assert isinstance(response, LLMResult)
|
assert isinstance(response, LLMResult)
|
||||||
assert len(response.generations) == 2
|
assert len(response.generations) == 2
|
||||||
|
assert response.llm_output
|
||||||
|
assert "system_fingerprint" in response.llm_output
|
||||||
for generations in response.generations:
|
for generations in response.generations:
|
||||||
assert len(generations) == 2
|
assert len(generations) == 2
|
||||||
for generation in generations:
|
for generation in generations:
|
||||||
@ -163,6 +165,8 @@ async def test_async_chat_openai() -> None:
|
|||||||
response = await chat.agenerate([[message], [message]])
|
response = await chat.agenerate([[message], [message]])
|
||||||
assert isinstance(response, LLMResult)
|
assert isinstance(response, LLMResult)
|
||||||
assert len(response.generations) == 2
|
assert len(response.generations) == 2
|
||||||
|
assert response.llm_output
|
||||||
|
assert "system_fingerprint" in response.llm_output
|
||||||
for generations in response.generations:
|
for generations in response.generations:
|
||||||
assert len(generations) == 2
|
assert len(generations) == 2
|
||||||
for generation in generations:
|
for generation in generations:
|
||||||
|
Loading…
Reference in New Issue
Block a user