mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-28 17:38:36 +00:00
core[patch]: Add doc-strings to outputs, fix @root_validator (#23190)
- Document outputs namespace - Update a vanilla @root_validator that was missed
This commit is contained in:
parent
8698cb9b28
commit
3c917204dc
@ -1,6 +1,22 @@
|
|||||||
"""**Output** classes are used to represent the output of a language model call
|
"""**Output** classes are used to represent the output of a language model call
|
||||||
and the output of a chat.
|
and the output of a chat.
|
||||||
|
|
||||||
|
The top container for information is the `LLMResult` object. `LLMResult` is used by
|
||||||
|
both chat models and LLMs. This object contains the output of the language
|
||||||
|
model and any additional information that the model provider wants to return.
|
||||||
|
|
||||||
|
When invoking models via the standard runnable methods (e.g. invoke, batch, etc.):
|
||||||
|
- Chat models will return `AIMessage` objects.
|
||||||
|
- LLMs will return regular text strings.
|
||||||
|
|
||||||
|
In addition, users can access the raw output of either LLMs or chat models via
|
||||||
|
callbacks. The on_chat_model_end and on_llm_end callbacks will return an
|
||||||
|
LLMResult object containing the generated outputs and any additional information
|
||||||
|
returned by the model provider.
|
||||||
|
|
||||||
|
In general, if information is already available
|
||||||
|
in the AIMessage object, it is recommended to access it from there rather than
|
||||||
|
from the `LLMResult` object.
|
||||||
"""
|
"""
|
||||||
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
|
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
|
||||||
from langchain_core.outputs.chat_result import ChatResult
|
from langchain_core.outputs.chat_result import ChatResult
|
||||||
|
@ -9,7 +9,18 @@ from langchain_core.utils._merge import merge_dicts
|
|||||||
|
|
||||||
|
|
||||||
class ChatGeneration(Generation):
|
class ChatGeneration(Generation):
|
||||||
"""A single chat generation output."""
|
"""A single chat generation output.
|
||||||
|
|
||||||
|
A subclass of Generation that represents the response from a chat model
|
||||||
|
that generates chat messages.
|
||||||
|
|
||||||
|
The `message` attribute is a structured representation of the chat message.
|
||||||
|
Most of the time, the message will be of type `AIMessage`.
|
||||||
|
|
||||||
|
Users working with chat models will usually access information via either
|
||||||
|
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
|
||||||
|
via callbacks).
|
||||||
|
"""
|
||||||
|
|
||||||
text: str = ""
|
text: str = ""
|
||||||
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
|
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
|
||||||
@ -19,7 +30,7 @@ class ChatGeneration(Generation):
|
|||||||
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
|
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
|
||||||
"""Type is used exclusively for serialization purposes."""
|
"""Type is used exclusively for serialization purposes."""
|
||||||
|
|
||||||
@root_validator
|
@root_validator(pre=False, skip_on_failure=True)
|
||||||
def set_text(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
def set_text(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
"""Set the text attribute to be the contents of the message."""
|
"""Set the text attribute to be the contents of the message."""
|
||||||
try:
|
try:
|
||||||
|
@ -5,11 +5,31 @@ from langchain_core.pydantic_v1 import BaseModel
|
|||||||
|
|
||||||
|
|
||||||
class ChatResult(BaseModel):
|
class ChatResult(BaseModel):
|
||||||
"""Class that contains all results for a single chat model call."""
|
"""Use to represent the result of a chat model call with a single prompt.
|
||||||
|
|
||||||
|
This container is used internally by some implementations of chat model,
|
||||||
|
it will eventually be mapped to a more general `LLMResult` object, and
|
||||||
|
then projected into an `AIMessage` object.
|
||||||
|
|
||||||
|
LangChain users working with chat models will usually access information via
|
||||||
|
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
|
||||||
|
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
|
||||||
|
for more information.
|
||||||
|
"""
|
||||||
|
|
||||||
generations: List[ChatGeneration]
|
generations: List[ChatGeneration]
|
||||||
"""List of the chat generations. This is a List because an input can have multiple
|
"""List of the chat generations.
|
||||||
candidate generations.
|
|
||||||
|
Generations is a list to allow for multiple candidate generations for a single
|
||||||
|
input prompt.
|
||||||
"""
|
"""
|
||||||
llm_output: Optional[dict] = None
|
llm_output: Optional[dict] = None
|
||||||
"""For arbitrary LLM provider specific output."""
|
"""For arbitrary LLM provider specific output.
|
||||||
|
|
||||||
|
This dictionary is a free-form dictionary that can contain any information that the
|
||||||
|
provider wants to return. It is not standardized and is provider-specific.
|
||||||
|
|
||||||
|
Users should generally avoid relying on this field and instead rely on
|
||||||
|
accessing relevant information from standardized fields present in
|
||||||
|
AIMessage.
|
||||||
|
"""
|
||||||
|
@ -7,18 +7,31 @@ from langchain_core.utils._merge import merge_dicts
|
|||||||
|
|
||||||
|
|
||||||
class Generation(Serializable):
|
class Generation(Serializable):
|
||||||
"""A single text generation output."""
|
"""A single text generation output.
|
||||||
|
|
||||||
|
Generation represents the response from an "old-fashioned" LLM that
|
||||||
|
generates regular text (not chat messages).
|
||||||
|
|
||||||
|
This model is used internally by chat model and will eventually
|
||||||
|
be mapped to a more general `LLMResult` object, and then projected into
|
||||||
|
an `AIMessage` object.
|
||||||
|
|
||||||
|
LangChain users working with chat models will usually access information via
|
||||||
|
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
|
||||||
|
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
|
||||||
|
for more information.
|
||||||
|
"""
|
||||||
|
|
||||||
text: str
|
text: str
|
||||||
"""Generated text output."""
|
"""Generated text output."""
|
||||||
|
|
||||||
generation_info: Optional[Dict[str, Any]] = None
|
generation_info: Optional[Dict[str, Any]] = None
|
||||||
"""Raw response from the provider. May include things like the
|
"""Raw response from the provider.
|
||||||
reason for finishing or token log probabilities.
|
|
||||||
|
May include things like the reason for finishing or token log probabilities.
|
||||||
"""
|
"""
|
||||||
type: Literal["Generation"] = "Generation"
|
type: Literal["Generation"] = "Generation"
|
||||||
"""Type is used exclusively for serialization purposes."""
|
"""Type is used exclusively for serialization purposes."""
|
||||||
# TODO: add log probs as separate attribute
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def is_lc_serializable(cls) -> bool:
|
def is_lc_serializable(cls) -> bool:
|
||||||
|
@ -9,13 +9,38 @@ from langchain_core.pydantic_v1 import BaseModel
|
|||||||
|
|
||||||
|
|
||||||
class LLMResult(BaseModel):
|
class LLMResult(BaseModel):
|
||||||
"""Class that contains all results for a batched LLM call."""
|
"""A container for results of an LLM call.
|
||||||
|
|
||||||
|
Both chat models and LLMs generate an LLMResult object. This object contains
|
||||||
|
the generated outputs and any additional information that the model provider
|
||||||
|
wants to return.
|
||||||
|
"""
|
||||||
|
|
||||||
generations: List[List[Generation]]
|
generations: List[List[Generation]]
|
||||||
"""List of generated outputs. This is a List[List[]] because
|
"""Generated outputs.
|
||||||
each input could have multiple candidate generations."""
|
|
||||||
|
The first dimension of the list represents completions for different input
|
||||||
|
prompts.
|
||||||
|
|
||||||
|
The second dimension of the list represents different candidate generations
|
||||||
|
for a given prompt.
|
||||||
|
|
||||||
|
When returned from an LLM the type is List[List[Generation]].
|
||||||
|
When returned from a chat model the type is List[List[ChatGeneration]].
|
||||||
|
|
||||||
|
ChatGeneration is a subclass of Generation that has a field for a structured
|
||||||
|
chat message.
|
||||||
|
"""
|
||||||
llm_output: Optional[dict] = None
|
llm_output: Optional[dict] = None
|
||||||
"""Arbitrary LLM provider-specific output."""
|
"""For arbitrary LLM provider specific output.
|
||||||
|
|
||||||
|
This dictionary is a free-form dictionary that can contain any information that the
|
||||||
|
provider wants to return. It is not standardized and is provider-specific.
|
||||||
|
|
||||||
|
Users should generally avoid relying on this field and instead rely on
|
||||||
|
accessing relevant information from standardized fields present in
|
||||||
|
AIMessage.
|
||||||
|
"""
|
||||||
run: Optional[List[RunInfo]] = None
|
run: Optional[List[RunInfo]] = None
|
||||||
"""List of metadata info for model call for each input."""
|
"""List of metadata info for model call for each input."""
|
||||||
|
|
||||||
|
@ -6,7 +6,15 @@ from langchain_core.pydantic_v1 import BaseModel
|
|||||||
|
|
||||||
|
|
||||||
class RunInfo(BaseModel):
|
class RunInfo(BaseModel):
|
||||||
"""Class that contains metadata for a single execution of a Chain or model."""
|
"""Class that contains metadata for a single execution of a Chain or model.
|
||||||
|
|
||||||
|
Here for backwards compatibility with older versions of langchain_core.
|
||||||
|
|
||||||
|
This model will likely be deprecated in the future.
|
||||||
|
|
||||||
|
Users can acquire the run_id information from callbacks or via run_id
|
||||||
|
information present in the astream_event API (depending on the use case).
|
||||||
|
"""
|
||||||
|
|
||||||
run_id: UUID
|
run_id: UUID
|
||||||
"""A unique identifier for the model or chain run."""
|
"""A unique identifier for the model or chain run."""
|
||||||
|
Loading…
Reference in New Issue
Block a user