From 3d1095218c91570f08da7d812b589367fdb627d5 Mon Sep 17 00:00:00 2001 From: Kim Minjong Date: Tue, 22 Aug 2023 02:56:42 +0700 Subject: [PATCH] Update ChatOpenAI._astream to respect finish_reason (#9431) Currently, ChatOpenAI._astream does not reflect finish_reason to generation_info. Change it to reflect that. --- libs/langchain/langchain/chat_models/openai.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index ea9da5131e5..c2c7880edc6 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -381,10 +381,16 @@ class ChatOpenAI(BaseChatModel): ): if len(chunk["choices"]) == 0: continue - delta = chunk["choices"][0]["delta"] - chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) + choice = chunk["choices"][0] + chunk = _convert_delta_to_message_chunk( + choice["delta"], default_chunk_class + ) + finish_reason = choice.get("finish_reason") + generation_info = ( + dict(finish_reason=finish_reason) if finish_reason is not None else None + ) default_chunk_class = chunk.__class__ - yield ChatGenerationChunk(message=chunk) + yield ChatGenerationChunk(message=chunk, generation_info=generation_info) if run_manager: await run_manager.on_llm_new_token(chunk.content)