mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-21 14:18:52 +00:00
[partner]: ollama llm fix (#24790)
This commit is contained in:
parent
4bb1a11e02
commit
78d97b49d9
@ -304,11 +304,7 @@ class OllamaLLM(BaseLLM):
|
|||||||
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
|
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
|
||||||
if not isinstance(stream_resp, str):
|
if not isinstance(stream_resp, str):
|
||||||
chunk = GenerationChunk(
|
chunk = GenerationChunk(
|
||||||
text=(
|
text=(stream_resp.get("response", "")),
|
||||||
stream_resp["message"]["content"]
|
|
||||||
if "message" in stream_resp
|
|
||||||
else ""
|
|
||||||
),
|
|
||||||
generation_info=(
|
generation_info=(
|
||||||
dict(stream_resp) if stream_resp.get("done") is True else None
|
dict(stream_resp) if stream_resp.get("done") is True else None
|
||||||
),
|
),
|
||||||
@ -330,11 +326,7 @@ class OllamaLLM(BaseLLM):
|
|||||||
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
|
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
|
||||||
if not isinstance(stream_resp, str):
|
if not isinstance(stream_resp, str):
|
||||||
chunk = GenerationChunk(
|
chunk = GenerationChunk(
|
||||||
text=(
|
text=(stream_resp.get("response", "")),
|
||||||
stream_resp["message"]["content"]
|
|
||||||
if "message" in stream_resp
|
|
||||||
else ""
|
|
||||||
),
|
|
||||||
generation_info=(
|
generation_info=(
|
||||||
dict(stream_resp) if stream_resp.get("done") is True else None
|
dict(stream_resp) if stream_resp.get("done") is True else None
|
||||||
),
|
),
|
||||||
|
Loading…
Reference in New Issue
Block a user