From d1c6ad77693fe2f3a15506301b9d51510ffba634 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Thu, 2 Nov 2023 16:33:44 +0000 Subject: [PATCH] Fix on_llm_new_token(chunk=) for some chat models (#12784) It was passing in message instead of generation --- libs/langchain/langchain/chat_models/fireworks.py | 10 ++++++---- libs/langchain/langchain/chat_models/konko.py | 5 +++-- libs/langchain/langchain/chat_models/openai.py | 10 ++++++---- libs/langchain/langchain/chat_models/tongyi.py | 5 +++-- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/libs/langchain/langchain/chat_models/fireworks.py b/libs/langchain/langchain/chat_models/fireworks.py index 1bc35ca42ee..36a7d582369 100644 --- a/libs/langchain/langchain/chat_models/fireworks.py +++ b/libs/langchain/langchain/chat_models/fireworks.py @@ -210,9 +210,10 @@ class ChatFireworks(BaseChatModel): dict(finish_reason=finish_reason) if finish_reason is not None else None ) default_chunk_class = chunk.__class__ - yield ChatGenerationChunk(message=chunk, generation_info=generation_info) + chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) + yield chunk if run_manager: - run_manager.on_llm_new_token(chunk.content, chunk=chunk) + run_manager.on_llm_new_token(chunk.text, chunk=chunk) async def _astream( self, @@ -239,9 +240,10 @@ class ChatFireworks(BaseChatModel): dict(finish_reason=finish_reason) if finish_reason is not None else None ) default_chunk_class = chunk.__class__ - yield ChatGenerationChunk(message=chunk, generation_info=generation_info) + chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) + yield chunk if run_manager: - await run_manager.on_llm_new_token(token=chunk.content, chunk=chunk) + await run_manager.on_llm_new_token(token=chunk.text, chunk=chunk) def conditional_decorator( diff --git a/libs/langchain/langchain/chat_models/konko.py b/libs/langchain/langchain/chat_models/konko.py index ab46a4c2d9a..aeb14c187ac 100644 --- a/libs/langchain/langchain/chat_models/konko.py +++ b/libs/langchain/langchain/chat_models/konko.py @@ -212,9 +212,10 @@ class ChatKonko(ChatOpenAI): dict(finish_reason=finish_reason) if finish_reason is not None else None ) default_chunk_class = chunk.__class__ - yield ChatGenerationChunk(message=chunk, generation_info=generation_info) + chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) + yield chunk if run_manager: - run_manager.on_llm_new_token(chunk.content, chunk=chunk) + run_manager.on_llm_new_token(chunk.text, chunk=chunk) def _generate( self, diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index d2993c13baf..7f96a1befbd 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -341,9 +341,10 @@ class ChatOpenAI(BaseChatModel): dict(finish_reason=finish_reason) if finish_reason is not None else None ) default_chunk_class = chunk.__class__ - yield ChatGenerationChunk(message=chunk, generation_info=generation_info) + chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) + yield chunk if run_manager: - run_manager.on_llm_new_token(chunk.content, chunk=chunk) + run_manager.on_llm_new_token(chunk.text, chunk=chunk) def _generate( self, @@ -415,9 +416,10 @@ class ChatOpenAI(BaseChatModel): dict(finish_reason=finish_reason) if finish_reason is not None else None ) default_chunk_class = chunk.__class__ - yield ChatGenerationChunk(message=chunk, generation_info=generation_info) + chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) + yield chunk if run_manager: - await run_manager.on_llm_new_token(token=chunk.content, chunk=chunk) + await run_manager.on_llm_new_token(token=chunk.text, chunk=chunk) async def _agenerate( self, diff --git a/libs/langchain/langchain/chat_models/tongyi.py b/libs/langchain/langchain/chat_models/tongyi.py index c3c7242353d..9176a1ae20b 100644 --- a/libs/langchain/langchain/chat_models/tongyi.py +++ b/libs/langchain/langchain/chat_models/tongyi.py @@ -360,9 +360,10 @@ class ChatTongyi(BaseChatModel): dict(finish_reason=finish_reason) if finish_reason is not None else None ) default_chunk_class = chunk.__class__ - yield ChatGenerationChunk(message=chunk, generation_info=generation_info) + chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) + yield chunk if run_manager: - run_manager.on_llm_new_token(chunk.content, chunk=chunk) + run_manager.on_llm_new_token(chunk.text, chunk=chunk) length = len(choice["message"]["content"]) def _create_message_dicts(