mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-21 14:18:52 +00:00
ollama[patch]: fix generation info (#30863)
https://github.com/langchain-ai/langchain/pull/30778 (not released) broke all invocation modes of ChatOllama (intent was to remove `"message"` from `generation_info`, but we turned `generation_info` into `stream_resp["message"]`), resulting in validation errors.
This commit is contained in:
parent
cf2697ec53
commit
47ded80b64
@ -719,6 +719,11 @@ class ChatOllama(BaseChatModel):
|
|||||||
is_thinking = False
|
is_thinking = False
|
||||||
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
|
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
|
||||||
if not isinstance(stream_resp, str):
|
if not isinstance(stream_resp, str):
|
||||||
|
if stream_resp.get("done") is True:
|
||||||
|
generation_info = dict(stream_resp)
|
||||||
|
_ = generation_info.pop("message", None)
|
||||||
|
else:
|
||||||
|
generation_info = None
|
||||||
chunk = ChatGenerationChunk(
|
chunk = ChatGenerationChunk(
|
||||||
message=AIMessageChunk(
|
message=AIMessageChunk(
|
||||||
content=(
|
content=(
|
||||||
@ -732,11 +737,7 @@ class ChatOllama(BaseChatModel):
|
|||||||
),
|
),
|
||||||
tool_calls=_get_tool_calls_from_response(stream_resp),
|
tool_calls=_get_tool_calls_from_response(stream_resp),
|
||||||
),
|
),
|
||||||
generation_info=(
|
generation_info=generation_info,
|
||||||
dict(stream_resp).pop("message", None)
|
|
||||||
if stream_resp.get("done") is True
|
|
||||||
else None
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
if chunk.generation_info and (
|
if chunk.generation_info and (
|
||||||
model := chunk.generation_info.get("model")
|
model := chunk.generation_info.get("model")
|
||||||
@ -773,6 +774,11 @@ class ChatOllama(BaseChatModel):
|
|||||||
is_thinking = False
|
is_thinking = False
|
||||||
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
|
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
|
||||||
if not isinstance(stream_resp, str):
|
if not isinstance(stream_resp, str):
|
||||||
|
if stream_resp.get("done") is True:
|
||||||
|
generation_info = dict(stream_resp)
|
||||||
|
_ = generation_info.pop("message", None)
|
||||||
|
else:
|
||||||
|
generation_info = None
|
||||||
chunk = ChatGenerationChunk(
|
chunk = ChatGenerationChunk(
|
||||||
message=AIMessageChunk(
|
message=AIMessageChunk(
|
||||||
content=(
|
content=(
|
||||||
@ -786,11 +792,7 @@ class ChatOllama(BaseChatModel):
|
|||||||
),
|
),
|
||||||
tool_calls=_get_tool_calls_from_response(stream_resp),
|
tool_calls=_get_tool_calls_from_response(stream_resp),
|
||||||
),
|
),
|
||||||
generation_info=(
|
generation_info=generation_info,
|
||||||
dict(stream_resp).pop("message", None)
|
|
||||||
if stream_resp.get("done") is True
|
|
||||||
else None
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
if chunk.generation_info and (
|
if chunk.generation_info and (
|
||||||
model := chunk.generation_info.get("model")
|
model := chunk.generation_info.get("model")
|
||||||
|
Loading…
Reference in New Issue
Block a user