Update ChatOpenAI._stream to respect finish_reason (#9672)

Currently, ChatOpenAI._stream does not reflect finish_reason to
generation_info. Change it to reflect that.

Same patch as https://github.com/langchain-ai/langchain/pull/9431 , but
also applies to _stream.
This commit is contained in:
Kim Minjong 2023-08-24 14:58:14 +09:00 committed by GitHub
parent 5990651070
commit d0ff0db698
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -307,10 +307,16 @@ class ChatOpenAI(BaseChatModel):
):
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
run_manager.on_llm_new_token(chunk.content)