diff --git a/libs/core/langchain_core/documents/base.py b/libs/core/langchain_core/documents/base.py index 9a2594455be..e83e012a545 100644 --- a/libs/core/langchain_core/documents/base.py +++ b/libs/core/langchain_core/documents/base.py @@ -32,9 +32,9 @@ class BaseMedia(Serializable): id: Optional[str] = None """An optional identifier for the document. - Ideally this should be unique across the document collection and formatted + Ideally this should be unique across the document collection and formatted as a UUID, but this will not be enforced. - + .. versionadded:: 0.2.11 """ diff --git a/libs/core/langchain_core/indexing/base.py b/libs/core/langchain_core/indexing/base.py index 5ce5422c3d5..824263415bd 100644 --- a/libs/core/langchain_core/indexing/base.py +++ b/libs/core/langchain_core/indexing/base.py @@ -465,26 +465,26 @@ class DeleteResponse(TypedDict, total=False): num_deleted: int """The number of items that were successfully deleted. - + If returned, this should only include *actual* deletions. - - If the ID did not exist to begin with, + + If the ID did not exist to begin with, it should not be included in this count. """ succeeded: Sequence[str] """The IDs that were successfully deleted. - + If returned, this should only include *actual* deletions. - + If the ID did not exist to begin with, it should not be included in this list. """ failed: Sequence[str] """The IDs that failed to be deleted. - - Please note that deleting an ID that + + Please note that deleting an ID that does not exist is **NOT** considered a failure. """ diff --git a/libs/core/langchain_core/language_models/base.py b/libs/core/langchain_core/language_models/base.py index 44ccd90e550..2df436f780f 100644 --- a/libs/core/langchain_core/language_models/base.py +++ b/libs/core/langchain_core/language_models/base.py @@ -100,12 +100,12 @@ class BaseLanguageModel( cache: Union[BaseCache, bool, None] = None """Whether to cache the response. - + * If true, will use the global cache. * If false, will not use a cache * If None, will use the global cache if it's set, otherwise no cache. * If instance of BaseCache, will use the provided cache. - + Caching is not currently supported for streaming methods of models. """ verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False) diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 8b19a510d2a..00d2a054ca5 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -208,8 +208,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): disable_streaming: Union[bool, Literal["tool_calling"]] = False """Whether to disable streaming for this model. - - If streaming is bypassed, then ``stream()/astream()`` will defer to + + If streaming is bypassed, then ``stream()/astream()`` will defer to ``invoke()/ainvoke()``. - If True, will always bypass streaming case. diff --git a/libs/core/langchain_core/language_models/fake.py b/libs/core/langchain_core/language_models/fake.py index 9465a4d93f1..74545f3eca9 100644 --- a/libs/core/langchain_core/language_models/fake.py +++ b/libs/core/langchain_core/language_models/fake.py @@ -21,12 +21,12 @@ class FakeListLLM(LLM): # it's only used by sub-classes. sleep: Optional[float] = None """Sleep time in seconds between responses. - + Ignored by FakeListLLM, but used by sub-classes. """ i: int = 0 """Internally incremented after every model invocation. - + Useful primarily for testing purposes. """ diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index 9564a3bc063..03a22e79764 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -65,7 +65,7 @@ class AIMessage(BaseMessage): example: bool = False """Use to denote that a message is part of an example conversation. - + At the moment, this is ignored by most models. Usage is discouraged. """ @@ -215,7 +215,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore - """The type of the message (used for deserialization). + """The type of the message (used for deserialization). Defaults to "AIMessageChunk".""" tool_call_chunks: list[ToolCallChunk] = [] diff --git a/libs/core/langchain_core/messages/base.py b/libs/core/langchain_core/messages/base.py index 96fa595fd0e..bf020f3fbff 100644 --- a/libs/core/langchain_core/messages/base.py +++ b/libs/core/langchain_core/messages/base.py @@ -25,7 +25,7 @@ class BaseMessage(Serializable): additional_kwargs: dict = Field(default_factory=dict) """Reserved for additional payload data associated with the message. - + For example, for a message from an AI, this could include tool calls as encoded by the model provider. """ @@ -35,16 +35,16 @@ class BaseMessage(Serializable): type: str """The type of the message. Must be a string that is unique to the message type. - + The purpose of this field is to allow for easy identification of the message type when deserializing messages. """ name: Optional[str] = None - """An optional name for the message. - + """An optional name for the message. + This can be used to provide a human-readable name for the message. - + Usage of this field is optional, and whether it's used or not is up to the model implementation. """ diff --git a/libs/core/langchain_core/messages/chat.py b/libs/core/langchain_core/messages/chat.py index e05be83343a..8bfbcc51536 100644 --- a/libs/core/langchain_core/messages/chat.py +++ b/libs/core/langchain_core/messages/chat.py @@ -35,7 +35,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk): # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore - """The type of the message (used during serialization). + """The type of the message (used during serialization). Defaults to "ChatMessageChunk".""" @classmethod diff --git a/libs/core/langchain_core/messages/function.py b/libs/core/langchain_core/messages/function.py index 448a720935d..f06fd4f3b65 100644 --- a/libs/core/langchain_core/messages/function.py +++ b/libs/core/langchain_core/messages/function.py @@ -42,7 +42,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment] - """The type of the message (used for serialization). + """The type of the message (used for serialization). Defaults to "FunctionMessageChunk".""" @classmethod diff --git a/libs/core/langchain_core/messages/human.py b/libs/core/langchain_core/messages/human.py index 96cbeabbe8a..8a847e39329 100644 --- a/libs/core/langchain_core/messages/human.py +++ b/libs/core/langchain_core/messages/human.py @@ -30,7 +30,7 @@ class HumanMessage(BaseMessage): example: bool = False """Use to denote that a message is part of an example conversation. - + At the moment, this is ignored by most models. Usage is discouraged. Defaults to False. """ @@ -66,7 +66,7 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk): # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] - """The type of the message (used for serialization). + """The type of the message (used for serialization). Defaults to "HumanMessageChunk".""" @classmethod diff --git a/libs/core/langchain_core/messages/system.py b/libs/core/langchain_core/messages/system.py index a182198ad03..a767fc21af2 100644 --- a/libs/core/langchain_core/messages/system.py +++ b/libs/core/langchain_core/messages/system.py @@ -60,7 +60,7 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk): # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] - """The type of the message (used for serialization). + """The type of the message (used for serialization). Defaults to "SystemMessageChunk".""" @classmethod diff --git a/libs/core/langchain_core/messages/tool.py b/libs/core/langchain_core/messages/tool.py index ec221083724..31d3f8b7056 100644 --- a/libs/core/langchain_core/messages/tool.py +++ b/libs/core/langchain_core/messages/tool.py @@ -58,11 +58,11 @@ class ToolMessage(BaseMessage): artifact: Any = None """Artifact of the Tool execution which is not meant to be sent to the model. - - Should only be specified if it is different from the message content, e.g. if only + + Should only be specified if it is different from the message content, e.g. if only a subset of the full tool output is being passed as message content but the full output is needed in other parts of the code. - + .. versionadded:: 0.2.17 """ @@ -191,7 +191,7 @@ class ToolCall(TypedDict): """The arguments to the tool call.""" id: Optional[str] """An identifier associated with the tool call. - + An identifier is needed to associate a tool call request with a tool call result in events when multiple concurrent tool calls are made. """ diff --git a/libs/core/langchain_core/output_parsers/json.py b/libs/core/langchain_core/output_parsers/json.py index 5bc43048a47..e9d3669e44e 100644 --- a/libs/core/langchain_core/output_parsers/json.py +++ b/libs/core/langchain_core/output_parsers/json.py @@ -42,7 +42,7 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]): """ pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore - """The Pydantic object to use for validation. + """The Pydantic object to use for validation. If None, no validation is performed.""" def _diff(self, prev: Optional[Any], next: Any) -> Any: diff --git a/libs/core/langchain_core/output_parsers/openai_functions.py b/libs/core/langchain_core/output_parsers/openai_functions.py index 8e29b4075a2..460333953ae 100644 --- a/libs/core/langchain_core/output_parsers/openai_functions.py +++ b/libs/core/langchain_core/output_parsers/openai_functions.py @@ -57,9 +57,9 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]): strict: bool = False """Whether to allow non-JSON-compliant strings. - + See: https://docs.python.org/3/library/json.html#encoders-and-decoders - + Useful when the parsed output may include unicode characters or new lines. """ @@ -226,7 +226,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): pydantic_schema: Union[type[BaseModel], dict[str, type[BaseModel]]] """The pydantic schema to parse the output with. - + If multiple schemas are provided, then the function name will be used to determine which schema to use. """ diff --git a/libs/core/langchain_core/output_parsers/openai_tools.py b/libs/core/langchain_core/output_parsers/openai_tools.py index 5c624f43cb6..a51f0d77657 100644 --- a/libs/core/langchain_core/output_parsers/openai_tools.py +++ b/libs/core/langchain_core/output_parsers/openai_tools.py @@ -142,12 +142,12 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]): first_tool_only: bool = False """Whether to return only the first tool call. - If False, the result will be a list of tool calls, or an empty list + If False, the result will be a list of tool calls, or an empty list if no tool calls are found. If true, and multiple tool calls are found, only the first one will be returned, - and the other tool calls will be ignored. - If no tool calls are found, None will be returned. + and the other tool calls will be ignored. + If no tool calls are found, None will be returned. """ def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: diff --git a/libs/core/langchain_core/output_parsers/xml.py b/libs/core/langchain_core/output_parsers/xml.py index d2b14f05d06..f476faf3137 100644 --- a/libs/core/langchain_core/output_parsers/xml.py +++ b/libs/core/langchain_core/output_parsers/xml.py @@ -12,12 +12,12 @@ from langchain_core.output_parsers.transform import BaseTransformOutputParser from langchain_core.runnables.utils import AddableDict XML_FORMAT_INSTRUCTIONS = """The output should be formatted as a XML file. -1. Output should conform to the tags below. +1. Output should conform to the tags below. 2. If tags are not given, make them on your own. 3. Remember to always open and close all the tags. As an example, for the tags ["foo", "bar", "baz"]: -1. String "\n \n \n \n" is a well-formatted instance of the schema. +1. String "\n \n \n \n" is a well-formatted instance of the schema. 2. String "\n \n " is a badly-formatted instance. 3. String "\n \n \n" is a badly-formatted instance. @@ -146,23 +146,23 @@ class XMLOutputParser(BaseTransformOutputParser): ) parser: Literal["defusedxml", "xml"] = "defusedxml" """Parser to use for XML parsing. Can be either 'defusedxml' or 'xml'. - - * 'defusedxml' is the default parser and is used to prevent XML vulnerabilities + + * 'defusedxml' is the default parser and is used to prevent XML vulnerabilities present in some distributions of Python's standard library xml. `defusedxml` is a wrapper around the standard library parser that sets up the parser with secure defaults. * 'xml' is the standard library parser. - + Use `xml` only if you are sure that your distribution of the standard library - is not vulnerable to XML vulnerabilities. - + is not vulnerable to XML vulnerabilities. + Please review the following resources for more information: - + * https://docs.python.org/3/library/xml.html#xml-vulnerabilities - * https://github.com/tiran/defusedxml - + * https://github.com/tiran/defusedxml + The standard library relies on libexpat for parsing XML: - https://github.com/libexpat/libexpat + https://github.com/libexpat/libexpat """ def get_format_instructions(self) -> str: diff --git a/libs/core/langchain_core/outputs/chat_result.py b/libs/core/langchain_core/outputs/chat_result.py index 1de553eab36..3098736438f 100644 --- a/libs/core/langchain_core/outputs/chat_result.py +++ b/libs/core/langchain_core/outputs/chat_result.py @@ -20,16 +20,16 @@ class ChatResult(BaseModel): generations: list[ChatGeneration] """List of the chat generations. - + Generations is a list to allow for multiple candidate generations for a single input prompt. """ llm_output: Optional[dict] = None """For arbitrary LLM provider specific output. - + This dictionary is a free-form dictionary that can contain any information that the provider wants to return. It is not standardized and is provider-specific. - + Users should generally avoid relying on this field and instead rely on accessing relevant information from standardized fields present in AIMessage. diff --git a/libs/core/langchain_core/outputs/generation.py b/libs/core/langchain_core/outputs/generation.py index 7022e6e03e3..bfd0cd70d75 100644 --- a/libs/core/langchain_core/outputs/generation.py +++ b/libs/core/langchain_core/outputs/generation.py @@ -26,8 +26,8 @@ class Generation(Serializable): """Generated text output.""" generation_info: Optional[dict[str, Any]] = None - """Raw response from the provider. - + """Raw response from the provider. + May include things like the reason for finishing or token log probabilities. """ type: Literal["Generation"] = "Generation" diff --git a/libs/core/langchain_core/outputs/llm_result.py b/libs/core/langchain_core/outputs/llm_result.py index 4430fae133f..c28d630b163 100644 --- a/libs/core/langchain_core/outputs/llm_result.py +++ b/libs/core/langchain_core/outputs/llm_result.py @@ -22,25 +22,25 @@ class LLMResult(BaseModel): list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]] ] """Generated outputs. - + The first dimension of the list represents completions for different input prompts. - + The second dimension of the list represents different candidate generations for a given prompt. - + When returned from an LLM the type is List[List[Generation]]. When returned from a chat model the type is List[List[ChatGeneration]]. - + ChatGeneration is a subclass of Generation that has a field for a structured chat message. """ llm_output: Optional[dict] = None """For arbitrary LLM provider specific output. - + This dictionary is a free-form dictionary that can contain any information that the provider wants to return. It is not standardized and is provider-specific. - + Users should generally avoid relying on this field and instead rely on accessing relevant information from standardized fields present in AIMessage. diff --git a/libs/core/langchain_core/prompts/base.py b/libs/core/langchain_core/prompts/base.py index 093b7938d47..a5a1c85400d 100644 --- a/libs/core/langchain_core/prompts/base.py +++ b/libs/core/langchain_core/prompts/base.py @@ -45,11 +45,11 @@ class BasePromptTemplate( """Base class for all prompt templates, returning a prompt.""" input_variables: list[str] - """A list of the names of the variables whose values are required as inputs to the + """A list of the names of the variables whose values are required as inputs to the prompt.""" optional_variables: list[str] = Field(default=[]) """optional_variables: A list of the names of the variables for placeholder - or MessagePlaceholder that are optional. These variables are auto inferred + or MessagePlaceholder that are optional. These variables are auto inferred from the prompt and user need not provide them.""" input_types: typing.Dict[str, Any] = Field(default_factory=dict, exclude=True) # noqa: UP006 """A dictionary of the types of the variables the prompt template expects. @@ -58,7 +58,7 @@ class BasePromptTemplate( """How to parse the output of calling an LLM on this formatted prompt.""" partial_variables: Mapping[str, Any] = Field(default_factory=dict) """A dictionary of the partial variables the prompt template carries. - + Partial variables populate the template so that you don't need to pass them in every time you call the prompt.""" metadata: Optional[typing.Dict[str, Any]] = None # noqa: UP006 diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 7a721daa1e9..201f916ff07 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -196,12 +196,12 @@ class MessagesPlaceholder(BaseMessagePromptTemplate): """Name of variable to use as messages.""" optional: bool = False - """If True format_messages can be called with no arguments and will return an empty - list. If False then a named argument with name `variable_name` must be passed + """If True format_messages can be called with no arguments and will return an empty + list. If False then a named argument with name `variable_name` must be passed in, even if the value is an empty list.""" n_messages: Optional[PositiveInt] = None - """Maximum number of messages to include. If None, then will include all. + """Maximum number of messages to include. If None, then will include all. Defaults to None.""" @classmethod diff --git a/libs/core/langchain_core/retrievers.py b/libs/core/langchain_core/retrievers.py index aa977e62334..7462569ddd3 100644 --- a/libs/core/langchain_core/retrievers.py +++ b/libs/core/langchain_core/retrievers.py @@ -136,14 +136,14 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC): """Optional list of tags associated with the retriever. Defaults to None. These tags will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. - You can use these to eg identify a specific instance of a retriever with its + You can use these to eg identify a specific instance of a retriever with its use case. """ metadata: Optional[dict[str, Any]] = None """Optional metadata associated with the retriever. Defaults to None. This metadata will be associated with each call to this retriever, and passed as arguments to the handlers defined in `callbacks`. - You can use these to eg identify a specific instance of a retriever with its + You can use these to eg identify a specific instance of a retriever with its use case. """ diff --git a/libs/core/langchain_core/runnables/config.py b/libs/core/langchain_core/runnables/config.py index 765bf79a2dd..15868dc269f 100644 --- a/libs/core/langchain_core/runnables/config.py +++ b/libs/core/langchain_core/runnables/config.py @@ -65,7 +65,7 @@ class RunnableConfig(TypedDict, total=False): max_concurrency: Optional[int] """ - Maximum number of parallel calls to make. If not provided, defaults to + Maximum number of parallel calls to make. If not provided, defaults to ThreadPoolExecutor's default. """ @@ -78,7 +78,7 @@ class RunnableConfig(TypedDict, total=False): """ Runtime values for attributes previously made configurable on this Runnable, or sub-Runnables, through .configurable_fields() or .configurable_alternatives(). - Check .output_schema() for a description of the attributes that have been made + Check .output_schema() for a description of the attributes that have been made configurable. """ diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index 5c56b8ef503..8dc9495be27 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -538,7 +538,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): prefix_keys: bool """Whether to prefix configurable fields of each alternative with a namespace - of the form ==, eg. a key named "temperature" used by + of the form ==, eg. a key named "temperature" used by the alternative named "gpt3" becomes "model==gpt3/temperature".""" @classmethod diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index 647c08b3e21..a08dc0b24f4 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -93,13 +93,13 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): """A sequence of fallbacks to try.""" exceptions_to_handle: tuple[type[BaseException], ...] = (Exception,) """The exceptions on which fallbacks should be tried. - + Any exception that is not a subclass of these exceptions will be raised immediately. """ exception_key: Optional[str] = None - """If string is specified then handled exceptions will be passed to fallbacks as + """If string is specified then handled exceptions will be passed to fallbacks as part of the input under the specified key. If None, exceptions - will not be passed to fallbacks. If used, the base Runnable and its fallbacks + will not be passed to fallbacks. If used, the base Runnable and its fallbacks must accept a dictionary as input.""" model_config = ConfigDict( diff --git a/libs/core/langchain_core/runnables/retry.py b/libs/core/langchain_core/runnables/retry.py index 11269417a40..0469dd961b4 100644 --- a/libs/core/langchain_core/runnables/retry.py +++ b/libs/core/langchain_core/runnables/retry.py @@ -96,10 +96,10 @@ class RunnableRetry(RunnableBindingBase[Input, Output]): retry_exception_types: tuple[type[BaseException], ...] = (Exception,) """The exception types to retry on. By default all exceptions are retried. - + In general you should only retry on exceptions that are likely to be transient, such as network errors. - + Good exceptions to retry are all server errors (5xx) and selected client errors (4xx) such as 429 Too Many Requests. """ diff --git a/libs/core/langchain_core/runnables/schema.py b/libs/core/langchain_core/runnables/schema.py index 228356c8de7..dcfd32b8b39 100644 --- a/libs/core/langchain_core/runnables/schema.py +++ b/libs/core/langchain_core/runnables/schema.py @@ -13,26 +13,26 @@ class EventData(TypedDict, total=False): input: Any """The input passed to the Runnable that generated the event. - - Inputs will sometimes be available at the *START* of the Runnable, and + + Inputs will sometimes be available at the *START* of the Runnable, and sometimes at the *END* of the Runnable. - + If a Runnable is able to stream its inputs, then its input by definition won't be known until the *END* of the Runnable when it has finished streaming its inputs. """ output: Any """The output of the Runnable that generated the event. - + Outputs will only be available at the *END* of the Runnable. - + For most Runnables, this field can be inferred from the `chunk` field, though there might be some exceptions for special cased Runnables (e.g., like chat models), which may return more information. """ chunk: Any """A streaming chunk from the output that generated the event. - + chunks support addition in general, and adding them up should result in the output of the Runnable that generated the event. """ @@ -85,49 +85,49 @@ class BaseStreamEvent(TypedDict): event: str """Event names are of the format: on_[runnable_type]_(start|stream|end). - + Runnable types are one of: - + - **llm** - used by non chat models - **chat_model** - used by chat models - **prompt** -- e.g., ChatPromptTemplate - **tool** -- from tools defined via @tool decorator or inheriting from Tool/BaseTool - **chain** - most Runnables are of this type - + Further, the events are categorized as one of: - + - **start** - when the Runnable starts - **stream** - when the Runnable is streaming - **end* - when the Runnable ends - + start, stream and end are associated with slightly different `data` payload. - + Please see the documentation for `EventData` for more details. """ run_id: str """An randomly generated ID to keep track of the execution of the given Runnable. - + Each child Runnable that gets invoked as part of the execution of a parent Runnable is assigned its own unique ID. """ tags: NotRequired[list[str]] """Tags associated with the Runnable that generated this event. - + Tags are always inherited from parent Runnables. - + Tags can either be bound to a Runnable using `.with_config({"tags": ["hello"]})` or passed at run time using `.astream_events(..., {"tags": ["hello"]})`. """ metadata: NotRequired[dict[str, Any]] """Metadata associated with the Runnable that generated this event. - - Metadata can either be bound to a Runnable using - + + Metadata can either be bound to a Runnable using + `.with_config({"metadata": { "foo": "bar" }})` - - or passed at run time using - + + or passed at run time using + `.astream_events(..., {"metadata": {"foo": "bar"}})`. """ @@ -136,11 +136,11 @@ class BaseStreamEvent(TypedDict): Root Events will have an empty list. - For example, if a Runnable A calls Runnable B, then the event generated by Runnable + For example, if a Runnable A calls Runnable B, then the event generated by Runnable B will have Runnable A's ID in the parent_ids field. The order of the parent IDs is from the root parent to the immediate parent. - + Only supported as of v2 of the astream events API. v1 will return an empty list. """ diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py index 98c45dd5bb9..85360f4171d 100644 --- a/libs/core/langchain_core/tools/base.py +++ b/libs/core/langchain_core/tools/base.py @@ -348,7 +348,7 @@ class ChildTool(BaseTool): """The unique name of the tool that clearly communicates its purpose.""" description: str """Used to tell the model how/when/why to use the tool. - + You can provide few-shot examples as a part of the description. """ @@ -356,17 +356,17 @@ class ChildTool(BaseTool): default=None, description="The tool schema." ) """Pydantic model class to validate and parse the tool's input arguments. - - Args schema should be either: - + + Args schema should be either: + - A subclass of pydantic.BaseModel. - or + or - A subclass of pydantic.v1.BaseModel if accessing v1 namespace in pydantic 2 """ return_direct: bool = False - """Whether to return the tool's output directly. - - Setting this to True means + """Whether to return the tool's output directly. + + Setting this to True means that after the tool is called, the AgentExecutor will stop looping. """ verbose: bool = False @@ -410,8 +410,8 @@ class ChildTool(BaseTool): response_format: Literal["content", "content_and_artifact"] = "content" """The tool response format. Defaults to 'content'. - If "content" then the output of the tool is interpreted as the contents of a - ToolMessage. If "content_and_artifact" then the output is expected to be a + If "content" then the output of the tool is interpreted as the contents of a + ToolMessage. If "content_and_artifact" then the output is expected to be a two-tuple corresponding to the (content, artifact) of a ToolMessage. """ diff --git a/libs/core/langchain_core/tracers/log_stream.py b/libs/core/langchain_core/tracers/log_stream.py index b4f5b4593b9..439d2a45381 100644 --- a/libs/core/langchain_core/tracers/log_stream.py +++ b/libs/core/langchain_core/tracers/log_stream.py @@ -53,8 +53,8 @@ class LogEntry(TypedDict): inputs: NotRequired[Optional[Any]] """Inputs to this run. Not available currently via astream_log.""" final_output: Optional[Any] - """Final output of this run. - + """Final output of this run. + Only available after the run has finished successfully.""" end_time: Optional[str] """ISO-8601 timestamp of when the run ended. diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 6991754029c..da5de48a0e6 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -44,7 +44,7 @@ python = ">=3.12.4" [tool.poetry.extras] [tool.ruff.lint] -select = [ "B", "C4", "E", "F", "I", "N", "PIE", "SIM", "T201", "UP",] +select = [ "B", "C4", "E", "F", "I", "N", "PIE", "SIM", "T201", "UP", "W"] ignore = [ "UP007",] [tool.coverage.run] diff --git a/libs/core/tests/unit_tests/output_parsers/test_json.py b/libs/core/tests/unit_tests/output_parsers/test_json.py index f96f2b4d482..96cf6d0cc4d 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_json.py +++ b/libs/core/tests/unit_tests/output_parsers/test_json.py @@ -136,7 +136,7 @@ WITHOUT_END_BRACKET = """Here is a response formatted as schema: ```json { "foo": "bar" - + """ @@ -146,7 +146,7 @@ WITH_END_BRACKET = """Here is a response formatted as schema: { "foo": "bar" } - + """ WITH_END_TICK = """Here is a response formatted as schema: @@ -155,7 +155,7 @@ WITH_END_TICK = """Here is a response formatted as schema: { "foo": "bar" } -``` +``` """ WITH_END_TEXT = """Here is a response formatted as schema: @@ -164,8 +164,8 @@ WITH_END_TEXT = """Here is a response formatted as schema: { "foo": "bar" -``` -This should do the trick +``` +This should do the trick """ TEST_CASES = [ diff --git a/libs/core/tests/unit_tests/prompts/test_prompt.py b/libs/core/tests/unit_tests/prompts/test_prompt.py index 79186586734..3e3452f78b4 100644 --- a/libs/core/tests/unit_tests/prompts/test_prompt.py +++ b/libs/core/tests/unit_tests/prompts/test_prompt.py @@ -219,7 +219,7 @@ def test_mustache_prompt_from_template(snapshot: SnapshotAssertion) -> None: yo hello - is a test.""" + is a test.""" # noqa: W293 ) assert prompt.input_variables == ["foo"] if PYDANTIC_VERSION >= (2, 9): @@ -408,7 +408,7 @@ def test_prompt_from_jinja2_template() -> None: # Empty input variable. template = """Hello there There is no variable here { -Will it get confused{ }? +Will it get confused{ }? """ prompt = PromptTemplate.from_template(template, template_format="jinja2") expected_prompt = PromptTemplate(