From 8eb0bdead343b2c2d1feb6eef82015f7468e0a7f Mon Sep 17 00:00:00 2001 From: alexqiao Date: Thu, 1 Aug 2024 21:19:55 +0800 Subject: [PATCH] community[patch]: Invoke callback prior to yielding token (#24917) **Description: Invoke callback prior to yielding token in stream method for chat_models .** **Issue**: https://github.com/langchain-ai/langchain/issues/16913 #16913 --- libs/community/langchain_community/chat_models/friendli.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/community/langchain_community/chat_models/friendli.py b/libs/community/langchain_community/chat_models/friendli.py index 593c51ea9e0..a860ebf98bc 100644 --- a/libs/community/langchain_community/chat_models/friendli.py +++ b/libs/community/langchain_community/chat_models/friendli.py @@ -134,9 +134,9 @@ class ChatFriendli(BaseChatModel, BaseFriendli): for chunk in stream: delta = chunk.choices[0].delta.content if delta: - yield ChatGenerationChunk(message=AIMessageChunk(content=delta)) if run_manager: run_manager.on_llm_new_token(delta) + yield ChatGenerationChunk(message=AIMessageChunk(content=delta)) async def _astream( self, @@ -152,9 +152,9 @@ class ChatFriendli(BaseChatModel, BaseFriendli): async for chunk in stream: delta = chunk.choices[0].delta.content if delta: - yield ChatGenerationChunk(message=AIMessageChunk(content=delta)) if run_manager: await run_manager.on_llm_new_token(delta) + yield ChatGenerationChunk(message=AIMessageChunk(content=delta)) def _generate( self,