From d0ff0db69829bb2324e4cf067b10a98bce5acf64 Mon Sep 17 00:00:00 2001 From: Kim Minjong Date: Thu, 24 Aug 2023 14:58:14 +0900 Subject: [PATCH] Update ChatOpenAI._stream to respect finish_reason (#9672) Currently, ChatOpenAI._stream does not reflect finish_reason to generation_info. Change it to reflect that. Same patch as https://github.com/langchain-ai/langchain/pull/9431 , but also applies to _stream. --- libs/langchain/langchain/chat_models/openai.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index c2c7880edc6..7cb1947cf88 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -307,10 +307,16 @@ class ChatOpenAI(BaseChatModel): ): if len(chunk["choices"]) == 0: continue - delta = chunk["choices"][0]["delta"] - chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) + choice = chunk["choices"][0] + chunk = _convert_delta_to_message_chunk( + choice["delta"], default_chunk_class + ) + finish_reason = choice.get("finish_reason") + generation_info = ( + dict(finish_reason=finish_reason) if finish_reason is not None else None + ) default_chunk_class = chunk.__class__ - yield ChatGenerationChunk(message=chunk) + yield ChatGenerationChunk(message=chunk, generation_info=generation_info) if run_manager: run_manager.on_llm_new_token(chunk.content)