core: Move json parsing in base chat model / output parser to bg thread (#24031)

- add version of AIMessageChunk.__add__ that can add many chunks,
instead of only 2
- In agenerate_from_stream merge and parse chunks in bg thread
- In output parse base classes do more work in bg threads where
appropriate

---------

Co-authored-by: William FH <13333726+hinthornw@users.noreply.github.com>
This commit is contained in:
Nuno Campos
2024-07-09 20:26:36 +01:00
committed by GitHub
parent 73966e693c
commit 160fc7f246
6 changed files with 191 additions and 164 deletions

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
from typing import Any, Dict, List, Literal
from typing import Any, Dict, List, Literal, Union
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
@@ -88,7 +88,9 @@ class ChatGenerationChunk(ChatGeneration):
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "output"]
def __add__(self, other: ChatGenerationChunk) -> ChatGenerationChunk:
def __add__(
self, other: Union[ChatGenerationChunk, List[ChatGenerationChunk]]
) -> ChatGenerationChunk:
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
@@ -98,6 +100,17 @@ class ChatGenerationChunk(ChatGeneration):
message=self.message + other.message,
generation_info=generation_info or None,
)
elif isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
else:
raise TypeError(
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"