From 67375e96e08377735ba35e0d014b6618d11ceb9f Mon Sep 17 00:00:00 2001 From: William De Vena <60664495+williamdevena@users.noreply.github.com> Date: Sun, 3 Mar 2024 23:14:22 +0100 Subject: [PATCH] community[patch]: Invoke callback prior to yielding token (#18448) ## PR title community[patch]: Invoke callback prior to yielding token ## PR message - Description: Invoke callback prior to yielding token in _stream method in llms/tongyi. - Issue: https://github.com/langchain-ai/langchain/issues/16913 - Dependencies: None --- libs/community/langchain_community/llms/tongyi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/community/langchain_community/llms/tongyi.py b/libs/community/langchain_community/llms/tongyi.py index a11cf9c5153..3734e2f3a69 100644 --- a/libs/community/langchain_community/llms/tongyi.py +++ b/libs/community/langchain_community/llms/tongyi.py @@ -285,13 +285,13 @@ class Tongyi(BaseLLM): ) for stream_resp in stream_generate_with_retry(self, prompt=prompt, **params): chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp)) - yield chunk if run_manager: run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, ) + yield chunk async def _astream( self, @@ -307,13 +307,13 @@ class Tongyi(BaseLLM): self, prompt=prompt, **params ): chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp)) - yield chunk if run_manager: await run_manager.on_llm_new_token( chunk.text, chunk=chunk, verbose=self.verbose, ) + yield chunk def _invocation_params(self, stop: Any, **kwargs: Any) -> Dict[str, Any]: params = {