mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-06 13:18:12 +00:00
Update ChatOpenAI._stream to respect finish_reason (#9672)
Currently, ChatOpenAI._stream does not reflect finish_reason to generation_info. Change it to reflect that. Same patch as https://github.com/langchain-ai/langchain/pull/9431 , but also applies to _stream.
This commit is contained in:
parent
5990651070
commit
d0ff0db698
@ -307,10 +307,16 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
):
|
):
|
||||||
if len(chunk["choices"]) == 0:
|
if len(chunk["choices"]) == 0:
|
||||||
continue
|
continue
|
||||||
delta = chunk["choices"][0]["delta"]
|
choice = chunk["choices"][0]
|
||||||
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
|
chunk = _convert_delta_to_message_chunk(
|
||||||
|
choice["delta"], default_chunk_class
|
||||||
|
)
|
||||||
|
finish_reason = choice.get("finish_reason")
|
||||||
|
generation_info = (
|
||||||
|
dict(finish_reason=finish_reason) if finish_reason is not None else None
|
||||||
|
)
|
||||||
default_chunk_class = chunk.__class__
|
default_chunk_class = chunk.__class__
|
||||||
yield ChatGenerationChunk(message=chunk)
|
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
|
||||||
if run_manager:
|
if run_manager:
|
||||||
run_manager.on_llm_new_token(chunk.content)
|
run_manager.on_llm_new_token(chunk.content)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user