From 280a914920990801be28534ac5bcc61bf1007b85 Mon Sep 17 00:00:00 2001 From: Yudhajit Sinha Date: Wed, 20 Mar 2024 20:26:09 +0530 Subject: [PATCH] community[patch]: Invoke callback prior to yielding token (ollama) (#18629) ## PR title community[patch]: Invoke callback prior to yielding token ## PR message - Description: Invoke callback prior to yielding token in _stream_ & _astream_ methods in llms/ollama. - Issue: #16913 - Dependencies: None --- libs/community/langchain_community/llms/ollama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/community/langchain_community/llms/ollama.py b/libs/community/langchain_community/llms/ollama.py index c4747a4ebb7..5c357ae9676 100644 --- a/libs/community/langchain_community/llms/ollama.py +++ b/libs/community/langchain_community/llms/ollama.py @@ -475,12 +475,12 @@ class Ollama(BaseLLM, _OllamaCommon): for stream_resp in self._create_generate_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_generation_chunk(stream_resp) - yield chunk if run_manager: run_manager.on_llm_new_token( chunk.text, verbose=self.verbose, ) + yield chunk async def _astream( self, @@ -492,9 +492,9 @@ class Ollama(BaseLLM, _OllamaCommon): async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_generation_chunk(stream_resp) - yield chunk if run_manager: await run_manager.on_llm_new_token( chunk.text, verbose=self.verbose, ) + yield chunk