core[patch]: docstring update (#16813)

- added missed docstrings
- formated docstrings to consistent form
This commit is contained in:
Leonid Ganeline 2024-02-09 12:47:41 -08:00 committed by GitHub
parent e10030e241
commit ae66bcbc10
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
33 changed files with 162 additions and 131 deletions

View File

@ -22,7 +22,7 @@ AnyMessage = Union[
def get_buffer_string(
messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI"
) -> str:
"""Convert sequence of Messages to strings and concatenate them into one string.
"""Convert a sequence of Messages to strings and concatenate them into one string.
Args:
messages: Messages to be converted to strings.
@ -111,6 +111,14 @@ def messages_from_dict(messages: Sequence[dict]) -> List[BaseMessage]:
def message_chunk_to_message(chunk: BaseMessageChunk) -> BaseMessage:
"""Convert a message chunk to a message.
Args:
chunk: Message chunk to convert.
Returns:
Message.
"""
if not isinstance(chunk, BaseMessageChunk):
return chunk
# chunk classes always have the equivalent non-chunk class as their first parent

View File

@ -8,7 +8,7 @@ from langchain_core.messages.base import (
class AIMessage(BaseMessage):
"""A Message from an AI."""
"""Message from an AI."""
example: bool = False
"""Whether this Message is being passed in to the model as part of an example
@ -27,7 +27,7 @@ AIMessage.update_forward_refs()
class AIMessageChunk(AIMessage, BaseMessageChunk):
"""A Message chunk from an AI."""
"""Message chunk from an AI."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the

View File

@ -12,7 +12,7 @@ if TYPE_CHECKING:
class BaseMessage(Serializable):
"""The base abstract Message class.
"""Base abstract Message class.
Messages are the inputs and outputs of ChatModels.
"""
@ -96,7 +96,7 @@ def merge_content(
class BaseMessageChunk(BaseMessage):
"""A Message chunk, which can be concatenated with other Message chunks."""
"""Message chunk, which can be concatenated with other Message chunks."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
@ -195,6 +195,15 @@ def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]:
def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
"""Get a title representation for a message.
Args:
title: The title.
bold: Whether to bold the title.
Returns:
The title representation.
"""
padded = " " + title + " "
sep_len = (80 - len(padded)) // 2
sep = "=" * sep_len

View File

@ -8,7 +8,7 @@ from langchain_core.messages.base import (
class ChatMessage(BaseMessage):
"""A Message that can be assigned an arbitrary speaker (i.e. role)."""
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
@ -25,7 +25,7 @@ ChatMessage.update_forward_refs()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""A Chat Message chunk."""
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the

View File

@ -8,7 +8,7 @@ from langchain_core.messages.base import (
class FunctionMessage(BaseMessage):
"""A Message for passing the result of executing a function back to a model."""
"""Message for passing the result of executing a function back to a model."""
name: str
"""The name of the function that was executed."""
@ -25,7 +25,7 @@ FunctionMessage.update_forward_refs()
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
"""A Function Message chunk."""
"""Function Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the

View File

@ -4,7 +4,7 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class HumanMessage(BaseMessage):
"""A Message from a human."""
"""Message from a human."""
example: bool = False
"""Whether this Message is being passed in to the model as part of an example
@ -23,7 +23,7 @@ HumanMessage.update_forward_refs()
class HumanMessageChunk(HumanMessage, BaseMessageChunk):
"""A Human Message chunk."""
"""Human Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the

View File

@ -4,7 +4,7 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class SystemMessage(BaseMessage):
"""A Message for priming AI behavior, usually passed in as the first of a sequence
"""Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
"""
@ -20,7 +20,7 @@ SystemMessage.update_forward_refs()
class SystemMessageChunk(SystemMessage, BaseMessageChunk):
"""A System Message chunk."""
"""System Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the

View File

@ -8,7 +8,7 @@ from langchain_core.messages.base import (
class ToolMessage(BaseMessage):
"""A Message for passing the result of executing a tool back to a model."""
"""Message for passing the result of executing a tool back to a model."""
tool_call_id: str
"""Tool call that this message is responding to."""
@ -25,7 +25,7 @@ ToolMessage.update_forward_refs()
class ToolMessageChunk(ToolMessage, BaseMessageChunk):
"""A Tool Message chunk."""
"""Tool Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the

View File

@ -35,7 +35,7 @@ class ChatGeneration(Generation):
class ChatGenerationChunk(ChatGeneration):
"""A ChatGeneration chunk, which can be concatenated with other
"""ChatGeneration chunk, which can be concatenated with other
ChatGeneration chunks.
Attributes:

View File

@ -32,7 +32,7 @@ class Generation(Serializable):
class GenerationChunk(Generation):
"""A Generation chunk, which can be concatenated with other Generation chunks."""
"""Generation chunk, which can be concatenated with other Generation chunks."""
@classmethod
def get_lc_namespace(cls) -> List[str]:

View File

@ -556,7 +556,7 @@ MessageLikeRepresentation = Union[
class ChatPromptTemplate(BaseChatPromptTemplate):
"""A prompt template for chat models.
"""Prompt template for chat models.
Use to create flexible templated prompts for chat models.

View File

@ -11,7 +11,7 @@ def _get_inputs(inputs: dict, input_variables: List[str]) -> dict:
class PipelinePromptTemplate(BasePromptTemplate):
"""A prompt template for composing multiple prompt templates together.
"""Prompt template for composing multiple prompt templates together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:

View File

@ -1632,7 +1632,7 @@ class Runnable(Generic[Input, Output], ABC):
class RunnableSerializable(Serializable, Runnable[Input, Output]):
"""A Runnable that can be serialized to JSON."""
"""Runnable that can be serialized to JSON."""
name: Optional[str] = None
"""The name of the runnable. Used for debugging and tracing."""
@ -1752,7 +1752,7 @@ def _seq_output_schema(
class RunnableSequence(RunnableSerializable[Input, Output]):
"""A sequence of runnables, where the output of each is the input of the next.
"""Sequence of Runnables, where the output of each is the input of the next.
RunnableSequence is the most important composition operator in LangChain as it is
used in virtually every chain.
@ -1764,7 +1764,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
The default implementations of `batch` and `abatch` utilize threadpools and
asyncio gather and will be faster than naive invocation of invoke or ainvoke
for IO bound runnables.
for IO bound Runnables.
Batching is implemented by invoking the batch method on each component of the
RunnableSequence in order.
@ -2451,11 +2451,11 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
class RunnableParallel(RunnableSerializable[Input, Dict[str, Any]]):
"""A runnable that runs a mapping of runnables in parallel, and returns a mapping
"""Runnable that runs a mapping of Runnables in parallel, and returns a mapping
of their outputs.
RunnableParallel is one of the two main composition primitives for the LCEL,
alongside RunnableSequence. It invokes runnables concurrently, providing the same
alongside RunnableSequence. It invokes Runnables concurrently, providing the same
input to each.
A RunnableParallel can be instantiated directly or by using a dict literal within a
@ -2882,7 +2882,7 @@ RunnableMap = RunnableParallel
class RunnableGenerator(Runnable[Input, Output]):
"""A runnable that runs a generator function.
"""Runnable that runs a generator function.
RunnableGenerators can be instantiated directly or by using a generator within
a sequence.
@ -3730,7 +3730,7 @@ class RunnableLambda(Runnable[Input, Output]):
class RunnableEachBase(RunnableSerializable[List[Input], List[Output]]):
"""A runnable that delegates calls to another runnable
"""Runnable that delegates calls to another Runnable
with each element of the input sequence.
Use only if creating a new RunnableEach subclass with different __init__ args.
@ -3838,13 +3838,13 @@ class RunnableEachBase(RunnableSerializable[List[Input], List[Output]]):
class RunnableEach(RunnableEachBase[Input, Output]):
"""A runnable that delegates calls to another runnable
"""Runnable that delegates calls to another Runnable
with each element of the input sequence.
It allows you to call multiple inputs with the bounded Runnable.
RunnableEach makes it easy to run multiple inputs for the runnable.
In the below example, we associate and run three three inputs
In the below example, we associate and run three inputs
with a Runnable:
.. code-block:: python
@ -3910,7 +3910,7 @@ class RunnableEach(RunnableEachBase[Input, Output]):
class RunnableBindingBase(RunnableSerializable[Input, Output]):
"""A runnable that delegates calls to another runnable with a set of kwargs.
"""Runnable that delegates calls to another Runnable with a set of kwargs.
Use only if creating a new RunnableBinding subclass with different __init__ args.
@ -4189,7 +4189,7 @@ RunnableBindingBase.update_forward_refs(RunnableConfig=RunnableConfig)
class RunnableBinding(RunnableBindingBase[Input, Output]):
"""Wrap a runnable with additional functionality.
"""Wrap a Runnable with additional functionality.
A RunnableBinding can be thought of as a "runnable decorator" that
preserves the essential features of Runnable; i.e., batching, streaming,

View File

@ -38,13 +38,13 @@ from langchain_core.runnables.utils import (
class RunnableBranch(RunnableSerializable[Input, Output]):
"""A Runnable that selects which branch to run based on a condition.
"""Runnable that selects which branch to run based on a condition.
The runnable is initialized with a list of (condition, runnable) pairs and
The Runnable is initialized with a list of (condition, Runnable) pairs and
a default branch.
When operating on an input, the first condition that evaluates to True is
selected, and the corresponding runnable is run on the input.
selected, and the corresponding Runnable is run on the input.
If no condition evaluates to True, the default branch is run on the input.

View File

@ -42,7 +42,7 @@ from langchain_core.runnables.utils import (
class DynamicRunnable(RunnableSerializable[Input, Output]):
"""A Serializable Runnable that can be dynamically configured."""
"""Serializable Runnable that can be dynamically configured."""
default: RunnableSerializable[Input, Output]
@ -220,7 +220,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]):
class RunnableConfigurableFields(DynamicRunnable[Input, Output]):
"""A Runnable that can be dynamically configured."""
"""Runnable that can be dynamically configured."""
fields: Dict[str, AnyConfigurableField]
@ -297,7 +297,7 @@ class RunnableConfigurableFields(DynamicRunnable[Input, Output]):
# Before Python 3.11 native StrEnum is not available
class StrEnum(str, enum.Enum):
"""A string enum."""
"""String enum."""
pass
@ -313,10 +313,10 @@ _enums_for_spec_lock = threading.Lock()
class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
"""A Runnable that can be dynamically configured.
"""Runnable that can be dynamically configured.
A RunnableConfigurableAlternatives should be initiated using the
`configurable_alternatives` method of a runnable or can be
`configurable_alternatives` method of a Runnable or can be
initiated directly as well.
Here is an example of using a RunnableConfigurableAlternatives that uses

View File

@ -39,20 +39,20 @@ if TYPE_CHECKING:
class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
"""A Runnable that can fallback to other Runnables if it fails.
"""Runnable that can fallback to other Runnables if it fails.
External APIs (e.g., APIs for a language model) may at times experience
degraded performance or even downtime.
In these cases, it can be useful to have a fallback runnable that can be
used in place of the original runnable (e.g., fallback to another LLM provider).
In these cases, it can be useful to have a fallback Runnable that can be
used in place of the original Runnable (e.g., fallback to another LLM provider).
Fallbacks can be defined at the level of a single runnable, or at the level
of a chain of runnables. Fallbacks are tried in order until one succeeds or
Fallbacks can be defined at the level of a single Runnable, or at the level
of a chain of Runnables. Fallbacks are tried in order until one succeeds or
all fail.
While you can instantiate a ``RunnableWithFallbacks`` directly, it is usually
more convenient to use the ``with_fallbacks`` method on a runnable.
more convenient to use the ``with_fallbacks`` method on a Runnable.
Example:

View File

@ -12,17 +12,23 @@ if TYPE_CHECKING:
class Edge(NamedTuple):
"""Edge in a graph."""
source: str
target: str
class Node(NamedTuple):
"""Node in a graph."""
id: str
data: Union[Type[BaseModel], RunnableType]
@dataclass
class Graph:
"""Graph of nodes and edges."""
nodes: Dict[str, Node] = field(default_factory=dict)
edges: List[Edge] = field(default_factory=list)

View File

@ -35,21 +35,21 @@ GetSessionHistoryCallable = Callable[..., BaseChatMessageHistory]
class RunnableWithMessageHistory(RunnableBindingBase):
"""A runnable that manages chat message history for another runnable.
"""Runnable that manages chat message history for another Runnable.
A chat message history is a sequence of messages that represent a conversation.
RunnableWithMessageHistory wraps another runnable and manages the chat message
RunnableWithMessageHistory wraps another Runnable and manages the chat message
history for it; it is responsible for reading and updating the chat message
history.
The formats supports for the inputs and outputs of the wrapped runnable
The formats supports for the inputs and outputs of the wrapped Runnable
are described below.
RunnableWithMessageHistory must always be called with a config that contains
the appropriate parameters for the chat message history factory.
By default the runnable is expected to take a single configuration parameter
By default the Runnable is expected to take a single configuration parameter
called `session_id` which is a string. This parameter is used to create a new
or look up an existing chat message history that matches the given session_id.

View File

@ -48,23 +48,23 @@ if TYPE_CHECKING:
def identity(x: Other) -> Other:
"""An identity function"""
"""Identity function"""
return x
async def aidentity(x: Other) -> Other:
"""An async identity function"""
"""Async identity function"""
return x
class RunnablePassthrough(RunnableSerializable[Other, Other]):
"""A runnable to passthrough inputs unchanged or with additional keys.
"""Runnable to passthrough inputs unchanged or with additional keys.
This runnable behaves almost like the identity function, except that it
can be configured to add additional keys to the output, if the input is a
dict.
The examples below demonstrate this runnable works using a few simple
The examples below demonstrate this Runnable works using a few simple
chains. The chains rely on simple lambdas to make the examples easy to execute
and experiment with.
@ -572,7 +572,7 @@ class RunnableAssign(RunnableSerializable[Dict[str, Any], Dict[str, Any]]):
class RunnablePick(RunnableSerializable[Dict[str, Any], Dict[str, Any]]):
"""
A runnable that picks keys from Dict[str, Any] inputs.
Runnable that picks keys from Dict[str, Any] inputs.
"""
keys: Union[str, List[str]]

View File

@ -37,7 +37,7 @@ U = TypeVar("U")
class RunnableRetry(RunnableBindingBase[Input, Output]):
"""Retry a Runnable if it fails.
A RunnableRetry helps can be used to add retry logic to any object
RunnableRetry can be used to add retry logic to any object
that subclasses the base Runnable.
Such retries are especially useful for network calls that may fail

View File

@ -34,7 +34,7 @@ from langchain_core.runnables.utils import (
class RouterInput(TypedDict):
"""A Router input.
"""Router input.
Attributes:
key: The key to route on.
@ -47,8 +47,8 @@ class RouterInput(TypedDict):
class RouterRunnable(RunnableSerializable[RouterInput, Output]):
"""
A runnable that routes to a set of runnables based on Input['key'].
Returns the output of the selected runnable.
Runnable that routes to a set of Runnables based on Input['key'].
Returns the output of the selected Runnable.
"""
runnables: Mapping[str, Runnable[Any, Output]]

View File

@ -12,32 +12,32 @@ class EventData(TypedDict, total=False):
input: Any
"""The input passed to the runnable that generated the event.
Inputs will sometimes be available at the *START* of the runnable, and
sometimes at the *END* of the runnable.
Inputs will sometimes be available at the *START* of the Runnable, and
sometimes at the *END* of the Runnable.
If a runnable is able to stream its inputs, then its input by definition
won't be known until the *END* of the runnable when it has finished streaming
If a Runnable is able to stream its inputs, then its input by definition
won't be known until the *END* of the Runnable when it has finished streaming
its inputs.
"""
output: Any
"""The output of the runnable that generated the event.
"""The output of the Runnable that generated the event.
Outputs will only be available at the *END* of the runnable.
Outputs will only be available at the *END* of the Runnable.
For most runnables, this field can be inferred from the `chunk` field,
though there might be some exceptions for special cased runnables (e.g., like
For most Runnables, this field can be inferred from the `chunk` field,
though there might be some exceptions for special cased Runnables (e.g., like
chat models), which may return more information.
"""
chunk: Any
"""A streaming chunk from the output that generated the event.
chunks support addition in general, and adding them up should result
in the output of the runnable that generated the event.
in the output of the Runnable that generated the event.
"""
class StreamEvent(TypedDict):
"""A streaming event.
"""Streaming event.
Schema of a streaming event which is produced from the astream_events method.

View File

@ -46,7 +46,15 @@ async def gated_coro(semaphore: asyncio.Semaphore, coro: Coroutine) -> Any:
async def gather_with_concurrency(n: Union[int, None], *coros: Coroutine) -> list:
"""Gather coroutines with a limit on the number of concurrent coroutines."""
"""Gather coroutines with a limit on the number of concurrent coroutines.
Args:
n: The number of coroutines to run concurrently.
coros: The coroutines to run.
Returns:
The results of the coroutines.
"""
if n is None:
return await asyncio.gather(*coros)
@ -344,7 +352,7 @@ async def aadd(addables: AsyncIterable[Addable]) -> Optional[Addable]:
class ConfigurableField(NamedTuple):
"""A field that can be configured by the user."""
"""Field that can be configured by the user."""
id: str
@ -358,7 +366,7 @@ class ConfigurableField(NamedTuple):
class ConfigurableFieldSingleOption(NamedTuple):
"""A field that can be configured by the user with a default value."""
"""Field that can be configured by the user with a default value."""
id: str
options: Mapping[str, Any]
@ -373,7 +381,7 @@ class ConfigurableFieldSingleOption(NamedTuple):
class ConfigurableFieldMultiOption(NamedTuple):
"""A field that can be configured by the user with multiple default values."""
"""Field that can be configured by the user with multiple default values."""
id: str
options: Mapping[str, Any]
@ -393,7 +401,7 @@ AnyConfigurableField = Union[
class ConfigurableFieldSpec(NamedTuple):
"""A field that can be configured by the user. It is a specification of a field."""
"""Field that can be configured by the user. It is a specification of a field."""
id: str
annotation: Any

View File

@ -99,10 +99,10 @@ def create_schema_from_function(
class ToolException(Exception):
"""An optional exception that tool throws when execution error occurs.
"""Optional exception that tool throws when execution error occurs.
When this exception is thrown, the agent will not stop working,
but will handle the exception according to the handle_tool_error
but it will handle the exception according to the handle_tool_error
variable of the tool, and the processing result will be returned
to the agent as observation, and printed in red on the console.
"""

View File

@ -31,7 +31,7 @@ def wait_for_all_evaluators() -> None:
class EvaluatorCallbackHandler(BaseTracer):
"""A tracer that runs a run evaluator whenever a run is persisted.
"""Tracer that runs a run evaluator whenever a run is persisted.
Parameters
----------

View File

@ -63,7 +63,7 @@ def _get_executor() -> ThreadPoolExecutor:
class LangChainTracer(BaseTracer):
"""An implementation of the SharedTracer that POSTS to the langchain endpoint."""
"""Implementation of the SharedTracer that POSTS to the LangChain endpoint."""
def __init__(
self,

View File

@ -37,7 +37,7 @@ def _get_endpoint() -> str:
@deprecated("0.1.0", alternative="LangChainTracer", removal="0.2.0")
class LangChainTracerV1(BaseTracer):
"""An implementation of the SharedTracer that POSTS to the langchain endpoint."""
"""Implementation of the SharedTracer that POSTS to the langchain endpoint."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the LangChain tracer."""

View File

@ -88,7 +88,7 @@ class RunState(TypedDict):
class RunLogPatch:
"""A patch to the run log."""
"""Patch to the run log."""
ops: List[Dict[str, Any]]
"""List of jsonpatch operations, which describe how to create the run state
@ -121,7 +121,7 @@ class RunLogPatch:
class RunLog(RunLogPatch):
"""A run log."""
"""Run log."""
state: RunState
"""Current state of the log, obtained from applying all ops in sequence."""
@ -159,7 +159,7 @@ T = TypeVar("T")
class LogStreamCallbackHandler(BaseTracer):
"""A tracer that streams run logs to a stream."""
"""Tracer that streams run logs to a stream."""
def __init__(
self,

View File

@ -12,7 +12,7 @@ Listener = Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
class RootListenersTracer(BaseTracer):
"""A tracer that calls listeners on run start, end, and error."""
"""Tracer that calls listeners on run start, end, and error."""
def __init__(
self,

View File

@ -9,7 +9,7 @@ from langchain_core.tracers.schemas import Run
class RunCollectorCallbackHandler(BaseTracer):
"""
A tracer that collects all nested runs in a list.
Tracer that collects all nested runs in a list.
This tracer is useful for inspection and evaluation purposes.

View File

@ -4,7 +4,7 @@ from typing import Any, List, Mapping, Sequence
class StrictFormatter(Formatter):
"""A subclass of formatter that checks for extra keys."""
"""Formatter that checks for extra keys."""
def vformat(
self, format_string: str, args: Sequence, kwargs: Mapping[str, Any]

View File

@ -1686,7 +1686,7 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'A Message from an AI.',
'description': 'Message from an AI.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -1734,7 +1734,7 @@
'type': 'object',
}),
'ChatMessage': dict({
'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).',
'description': 'Message that can be assigned an arbitrary speaker (i.e. role).',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -1829,7 +1829,7 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'A Message for passing the result of executing a function back to a model.',
'description': 'Message for passing the result of executing a function back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -1877,7 +1877,7 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'A Message from a human.',
'description': 'Message from a human.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -1948,7 +1948,7 @@
}),
'SystemMessage': dict({
'description': '''
A Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
''',
'properties': dict({
@ -1993,7 +1993,7 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'A Message for passing the result of executing a tool back to a model.',
'description': 'Message for passing the result of executing a tool back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2084,7 +2084,7 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'A Message from an AI.',
'description': 'Message from an AI.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2132,7 +2132,7 @@
'type': 'object',
}),
'ChatMessage': dict({
'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).',
'description': 'Message that can be assigned an arbitrary speaker (i.e. role).',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2227,7 +2227,7 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'A Message for passing the result of executing a function back to a model.',
'description': 'Message for passing the result of executing a function back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2275,7 +2275,7 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'A Message from a human.',
'description': 'Message from a human.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2346,7 +2346,7 @@
}),
'SystemMessage': dict({
'description': '''
A Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
''',
'properties': dict({
@ -2391,7 +2391,7 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'A Message for passing the result of executing a tool back to a model.',
'description': 'Message for passing the result of executing a tool back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2466,7 +2466,7 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'A Message from an AI.',
'description': 'Message from an AI.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2514,7 +2514,7 @@
'type': 'object',
}),
'ChatMessage': dict({
'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).',
'description': 'Message that can be assigned an arbitrary speaker (i.e. role).',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2562,7 +2562,7 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'A Message for passing the result of executing a function back to a model.',
'description': 'Message for passing the result of executing a function back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2610,7 +2610,7 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'A Message from a human.',
'description': 'Message from a human.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2659,7 +2659,7 @@
}),
'SystemMessage': dict({
'description': '''
A Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
''',
'properties': dict({
@ -2704,7 +2704,7 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'A Message for passing the result of executing a tool back to a model.',
'description': 'Message for passing the result of executing a tool back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2767,7 +2767,7 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'A Message from an AI.',
'description': 'Message from an AI.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2815,7 +2815,7 @@
'type': 'object',
}),
'ChatMessage': dict({
'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).',
'description': 'Message that can be assigned an arbitrary speaker (i.e. role).',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2910,7 +2910,7 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'A Message for passing the result of executing a function back to a model.',
'description': 'Message for passing the result of executing a function back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -2958,7 +2958,7 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'A Message from a human.',
'description': 'Message from a human.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3029,7 +3029,7 @@
}),
'SystemMessage': dict({
'description': '''
A Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
''',
'properties': dict({
@ -3074,7 +3074,7 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'A Message for passing the result of executing a tool back to a model.',
'description': 'Message for passing the result of executing a tool back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3137,7 +3137,7 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'A Message from an AI.',
'description': 'Message from an AI.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3185,7 +3185,7 @@
'type': 'object',
}),
'ChatMessage': dict({
'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).',
'description': 'Message that can be assigned an arbitrary speaker (i.e. role).',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3280,7 +3280,7 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'A Message for passing the result of executing a function back to a model.',
'description': 'Message for passing the result of executing a function back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3328,7 +3328,7 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'A Message from a human.',
'description': 'Message from a human.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3399,7 +3399,7 @@
}),
'SystemMessage': dict({
'description': '''
A Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
''',
'properties': dict({
@ -3444,7 +3444,7 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'A Message for passing the result of executing a tool back to a model.',
'description': 'Message for passing the result of executing a tool back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3499,7 +3499,7 @@
dict({
'definitions': dict({
'AIMessage': dict({
'description': 'A Message from an AI.',
'description': 'Message from an AI.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3547,7 +3547,7 @@
'type': 'object',
}),
'ChatMessage': dict({
'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).',
'description': 'Message that can be assigned an arbitrary speaker (i.e. role).',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3642,7 +3642,7 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'A Message for passing the result of executing a function back to a model.',
'description': 'Message for passing the result of executing a function back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3690,7 +3690,7 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'A Message from a human.',
'description': 'Message from a human.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3772,7 +3772,7 @@
}),
'SystemMessage': dict({
'description': '''
A Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
''',
'properties': dict({
@ -3817,7 +3817,7 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'A Message for passing the result of executing a tool back to a model.',
'description': 'Message for passing the result of executing a tool back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3899,7 +3899,7 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'A Message from an AI.',
'description': 'Message from an AI.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3947,7 +3947,7 @@
'type': 'object',
}),
'ChatMessage': dict({
'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).',
'description': 'Message that can be assigned an arbitrary speaker (i.e. role).',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -3995,7 +3995,7 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'A Message for passing the result of executing a function back to a model.',
'description': 'Message for passing the result of executing a function back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -4043,7 +4043,7 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'A Message from a human.',
'description': 'Message from a human.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -4092,7 +4092,7 @@
}),
'SystemMessage': dict({
'description': '''
A Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior, usually passed in as the first of a sequence
of input messages.
''',
'properties': dict({
@ -4137,7 +4137,7 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'A Message for passing the result of executing a tool back to a model.',
'description': 'Message for passing the result of executing a tool back to a model.',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',

View File

@ -320,7 +320,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
"definitions": {
"AIMessage": {
"title": "AIMessage",
"description": "A Message from an AI.",
"description": "Message from an AI.",
"type": "object",
"properties": {
"content": {
@ -355,7 +355,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
},
"HumanMessage": {
"title": "HumanMessage",
"description": "A Message from a human.",
"description": "Message from a human.",
"type": "object",
"properties": {
"content": {
@ -390,7 +390,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
},
"ChatMessage": {
"title": "ChatMessage",
"description": "A Message that can be assigned an arbitrary speaker (i.e. role).", # noqa
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa
"type": "object",
"properties": {
"content": {
@ -421,7 +421,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
},
"SystemMessage": {
"title": "SystemMessage",
"description": "A Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa
"description": "Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa
"type": "object",
"properties": {
"content": {
@ -451,7 +451,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
},
"FunctionMessage": {
"title": "FunctionMessage",
"description": "A Message for passing the result of executing a function back to a model.", # noqa
"description": "Message for passing the result of executing a function back to a model.", # noqa
"type": "object",
"properties": {
"content": {
@ -482,7 +482,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
},
"ToolMessage": {
"title": "ToolMessage",
"description": "A Message for passing the result of executing a tool back to a model.", # noqa
"description": "Message for passing the result of executing a tool back to a model.", # noqa
"type": "object",
"properties": {
"content": {