From d0e101e4e01c662dae153bd0ab0b5ef355e7a986 Mon Sep 17 00:00:00 2001 From: Fei Wang Date: Thu, 18 Jan 2024 01:42:41 +0800 Subject: [PATCH] community[patch]: fix ollama astream (#16070) Update ollama.py --- libs/community/langchain_community/llms/ollama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/community/langchain_community/llms/ollama.py b/libs/community/langchain_community/llms/ollama.py index 29a724b07e9..db8d6617048 100644 --- a/libs/community/langchain_community/llms/ollama.py +++ b/libs/community/langchain_community/llms/ollama.py @@ -468,7 +468,7 @@ class Ollama(BaseLLM, _OllamaCommon): run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: - async for stream_resp in self._acreate_stream(prompt, stop, **kwargs): + async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_generation_chunk(stream_resp) yield chunk