From 4cf67084d316f223c8d0d5719e6ef881aea58511 Mon Sep 17 00:00:00 2001 From: ccurme Date: Wed, 17 Jul 2024 12:59:26 -0400 Subject: [PATCH] openai[patch]: fix key collision and _astream (#24345) Fixes small issues introduced in https://github.com/langchain-ai/langchain/pull/24150 (unreleased). --- .../partners/openai/langchain_openai/chat_models/base.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 71359ab7d5d..7c947d67dae 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -631,8 +631,11 @@ class BaseChatOpenAI(BaseChatModel): "output_tokens": token_usage.get("completion_tokens", 0), "total_tokens": token_usage.get("total_tokens", 0), } - generation_info = dict( - finish_reason=res.get("finish_reason"), **(generation_info or {}) + generation_info = generation_info or {} + generation_info["finish_reason"] = ( + res.get("finish_reason") + if res.get("finish_reason") is not None + else generation_info.get("finish_reason") ) if "logprobs" in res: generation_info["logprobs"] = res["logprobs"] @@ -660,7 +663,7 @@ class BaseChatOpenAI(BaseChatModel): response = raw_response.parse() base_generation_info = {"headers": dict(raw_response.headers)} else: - response = self.async_client.create(**payload) + response = await self.async_client.create(**payload) base_generation_info = {} async with response: is_first_chunk = True