mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-23 11:32:10 +00:00
community[patch]: Invoke callback prior to yielding token (ollama) (#18629)
## PR title community[patch]: Invoke callback prior to yielding token ## PR message - Description: Invoke callback prior to yielding token in _stream_ & _astream_ methods in llms/ollama. - Issue: #16913 - Dependencies: None
This commit is contained in:
parent
9dfce56b31
commit
280a914920
@ -475,12 +475,12 @@ class Ollama(BaseLLM, _OllamaCommon):
|
|||||||
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
|
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
|
||||||
if stream_resp:
|
if stream_resp:
|
||||||
chunk = _stream_response_to_generation_chunk(stream_resp)
|
chunk = _stream_response_to_generation_chunk(stream_resp)
|
||||||
yield chunk
|
|
||||||
if run_manager:
|
if run_manager:
|
||||||
run_manager.on_llm_new_token(
|
run_manager.on_llm_new_token(
|
||||||
chunk.text,
|
chunk.text,
|
||||||
verbose=self.verbose,
|
verbose=self.verbose,
|
||||||
)
|
)
|
||||||
|
yield chunk
|
||||||
|
|
||||||
async def _astream(
|
async def _astream(
|
||||||
self,
|
self,
|
||||||
@ -492,9 +492,9 @@ class Ollama(BaseLLM, _OllamaCommon):
|
|||||||
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
|
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
|
||||||
if stream_resp:
|
if stream_resp:
|
||||||
chunk = _stream_response_to_generation_chunk(stream_resp)
|
chunk = _stream_response_to_generation_chunk(stream_resp)
|
||||||
yield chunk
|
|
||||||
if run_manager:
|
if run_manager:
|
||||||
await run_manager.on_llm_new_token(
|
await run_manager.on_llm_new_token(
|
||||||
chunk.text,
|
chunk.text,
|
||||||
verbose=self.verbose,
|
verbose=self.verbose,
|
||||||
)
|
)
|
||||||
|
yield chunk
|
||||||
|
Loading…
Reference in New Issue
Block a user