diff --git a/AGENTS.md b/AGENTS.md index 64fb862b18a..ee61a460a41 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -149,7 +149,7 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool: Args: to: The email address of the recipient. msg: The message body to send. - priority: Email priority level (`'low'`, ``'normal'``, `'high'`). + priority: Email priority level (`'low'`, `'normal'`, `'high'`). Returns: True if email was sent successfully, False otherwise. diff --git a/CLAUDE.md b/CLAUDE.md index 64fb862b18a..ee61a460a41 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -149,7 +149,7 @@ def send_email(to: str, msg: str, *, priority: str = "normal") -> bool: Args: to: The email address of the recipient. msg: The message body to send. - priority: Email priority level (`'low'`, ``'normal'``, `'high'`). + priority: Email priority level (`'low'`, `'normal'`, `'high'`). Returns: True if email was sent successfully, False otherwise. diff --git a/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py b/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py index 830fe9b54a7..c43712325c7 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py @@ -26,8 +26,8 @@ class Chat__ModuleName__(BaseChatModel): # TODO: Replace with relevant packages, env vars. Setup: - Install ``__package_name__`` and set environment variable - ``__MODULE_NAME___API_KEY``. + Install `__package_name__` and set environment variable + `__MODULE_NAME___API_KEY`. .. code-block:: bash @@ -145,9 +145,9 @@ class Chat__ModuleName__(BaseChatModel): .. code-block:: python - # TODO: Example output. + # TODO: Example output. - See ``Chat__ModuleName__.bind_tools()`` method for more. + See `Chat__ModuleName__.bind_tools()` method for more. # TODO: Delete if .with_structured_output() isn't supported. Structured output: @@ -171,7 +171,7 @@ class Chat__ModuleName__(BaseChatModel): # TODO: Example output. - See ``Chat__ModuleName__.with_structured_output()`` for more. + See `Chat__ModuleName__.with_structured_output()` for more. # TODO: Delete if JSON mode response format isn't supported. JSON mode: @@ -255,7 +255,7 @@ class Chat__ModuleName__(BaseChatModel): .. code-block:: python - # TODO: Example output. + # TODO: Example output. Response metadata .. code-block:: python @@ -265,7 +265,7 @@ class Chat__ModuleName__(BaseChatModel): .. code-block:: python - # TODO: Example output. + # TODO: Example output. """ # noqa: E501 @@ -314,11 +314,11 @@ class Chat__ModuleName__(BaseChatModel): Args: messages: the prompt composed of a list of messages. stop: a list of strings on which the model should stop generating. - If generation stops due to a stop token, the stop token itself - SHOULD BE INCLUDED as part of the output. This is not enforced - across models right now, but it's a good practice to follow since - it makes it much easier to parse the output of the model - downstream and understand why generation stopped. + If generation stops due to a stop token, the stop token itself + SHOULD BE INCLUDED as part of the output. This is not enforced + across models right now, but it's a good practice to follow since + it makes it much easier to parse the output of the model + downstream and understand why generation stopped. run_manager: A run manager with callbacks for the LLM. """ # Replace this with actual logic to generate a response from a list @@ -362,11 +362,11 @@ class Chat__ModuleName__(BaseChatModel): Args: messages: the prompt composed of a list of messages. stop: a list of strings on which the model should stop generating. - If generation stops due to a stop token, the stop token itself - SHOULD BE INCLUDED as part of the output. This is not enforced - across models right now, but it's a good practice to follow since - it makes it much easier to parse the output of the model - downstream and understand why generation stopped. + If generation stops due to a stop token, the stop token itself + SHOULD BE INCLUDED as part of the output. This is not enforced + across models right now, but it's a good practice to follow since + it makes it much easier to parse the output of the model + downstream and understand why generation stopped. run_manager: A run manager with callbacks for the LLM. """ last_message = messages[-1] diff --git a/libs/cli/langchain_cli/integration_template/integration_template/document_loaders.py b/libs/cli/langchain_cli/integration_template/integration_template/document_loaders.py index 5266ca1e2ea..0d879f37ba1 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/document_loaders.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/document_loaders.py @@ -14,8 +14,8 @@ class __ModuleName__Loader(BaseLoader): # TODO: Replace with relevant packages, env vars. Setup: - Install ``__package_name__`` and set environment variable - ``__MODULE_NAME___API_KEY``. + Install `__package_name__` and set environment variable + `__MODULE_NAME___API_KEY`. .. code-block:: bash diff --git a/libs/cli/langchain_cli/integration_template/integration_template/embeddings.py b/libs/cli/langchain_cli/integration_template/integration_template/embeddings.py index 5df2bbfbd40..ff9d8482f96 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/embeddings.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/embeddings.py @@ -8,8 +8,8 @@ class __ModuleName__Embeddings(Embeddings): # TODO: Replace with relevant packages, env vars. Setup: - Install ``__package_name__`` and set environment variable - ``__MODULE_NAME___API_KEY``. + Install `__package_name__` and set environment variable + `__MODULE_NAME___API_KEY`. .. code-block:: bash @@ -49,7 +49,7 @@ class __ModuleName__Embeddings(Embeddings): Embed multiple text: .. code-block:: python - input_texts = ["Document 1...", "Document 2..."] + input_texts = ["Document 1...", "Document 2..."] embed.embed_documents(input_texts) .. code-block:: python diff --git a/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py b/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py index 1bb863364b1..5888f43fbcb 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py @@ -14,8 +14,8 @@ class __ModuleName__Retriever(BaseRetriever): # TODO: Replace with relevant packages, env vars, etc. Setup: - Install ``__package_name__`` and set environment variable - ``__MODULE_NAME___API_KEY``. + Install `__package_name__` and set environment variable + `__MODULE_NAME___API_KEY`. .. code-block:: bash diff --git a/libs/cli/langchain_cli/integration_template/integration_template/toolkits.py b/libs/cli/langchain_cli/integration_template/integration_template/toolkits.py index b1f83205da3..95dd4530b53 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/toolkits.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/toolkits.py @@ -12,8 +12,8 @@ class __ModuleName__Toolkit(BaseToolkit): # TODO: Replace with relevant packages, env vars, etc. Setup: - Install ``__package_name__`` and set environment variable - ``__MODULE_NAME___API_KEY``. + Install `__package_name__` and set environment variable + `__MODULE_NAME___API_KEY`. .. code-block:: bash diff --git a/libs/cli/langchain_cli/integration_template/integration_template/tools.py b/libs/cli/langchain_cli/integration_template/integration_template/tools.py index fe8281187e8..cd9fca64b13 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/tools.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/tools.py @@ -27,8 +27,8 @@ class __ModuleName__Tool(BaseTool): # type: ignore[override] Setup: # TODO: Replace with relevant packages, env vars. - Install ``__package_name__`` and set environment variable - ``__MODULE_NAME___API_KEY``. + Install `__package_name__` and set environment variable + `__MODULE_NAME___API_KEY`. .. code-block:: bash diff --git a/libs/cli/langchain_cli/integration_template/integration_template/vectorstores.py b/libs/cli/langchain_cli/integration_template/integration_template/vectorstores.py index bc88dd8e3b8..03cb70e36b6 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/vectorstores.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/vectorstores.py @@ -28,7 +28,7 @@ class __ModuleName__VectorStore(VectorStore): # TODO: Replace with relevant packages, env vars. Setup: - Install ``__package_name__`` and set environment variable ``__MODULE_NAME___API_KEY``. + Install `__package_name__` and set environment variable `__MODULE_NAME___API_KEY`. .. code-block:: bash diff --git a/libs/core/langchain_core/agents.py b/libs/core/langchain_core/agents.py index 2d99d64150f..a0ad88f219f 100644 --- a/libs/core/langchain_core/agents.py +++ b/libs/core/langchain_core/agents.py @@ -86,7 +86,7 @@ class AgentAction(Serializable): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "agent"]`` + `["langchain", "schema", "agent"]` """ return ["langchain", "schema", "agent"] @@ -163,7 +163,7 @@ class AgentFinish(Serializable): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "agent"]`` + `["langchain", "schema", "agent"]` """ return ["langchain", "schema", "agent"] diff --git a/libs/core/langchain_core/callbacks/base.py b/libs/core/langchain_core/callbacks/base.py index d7ea77b99f7..41c37ef9c65 100644 --- a/libs/core/langchain_core/callbacks/base.py +++ b/libs/core/langchain_core/callbacks/base.py @@ -247,7 +247,7 @@ class CallbackManagerMixin: !!! warning This method is called for non-chat models (regular LLMs). If you're implementing a handler for a chat model, you should use - ``on_chat_model_start`` instead. + `on_chat_model_start` instead. Args: serialized: The serialized LLM. @@ -274,7 +274,7 @@ class CallbackManagerMixin: !!! warning This method is called for chat models. If you're implementing a handler for - a non-chat model, you should use ``on_llm_start`` instead. + a non-chat model, you should use `on_llm_start` instead. Args: serialized: The serialized chat model. @@ -414,7 +414,7 @@ class RunManagerMixin: Args: name: The name of the custom event. data: The data for the custom event. Format will match - the format specified by the user. + the format specified by the user. run_id: The ID of the run. tags: The tags associated with the custom event (includes inherited tags). @@ -496,7 +496,7 @@ class AsyncCallbackHandler(BaseCallbackHandler): !!! warning This method is called for non-chat models (regular LLMs). If you're implementing a handler for a chat model, you should use - ``on_chat_model_start`` instead. + `on_chat_model_start` instead. Args: serialized: The serialized LLM. @@ -523,7 +523,7 @@ class AsyncCallbackHandler(BaseCallbackHandler): !!! warning This method is called for chat models. If you're implementing a handler for - a non-chat model, you should use ``on_llm_start`` instead. + a non-chat model, you should use `on_llm_start` instead. Args: serialized: The serialized chat model. @@ -876,7 +876,7 @@ class AsyncCallbackHandler(BaseCallbackHandler): Args: name: The name of the custom event. data: The data for the custom event. Format will match - the format specified by the user. + the format specified by the user. run_id: The ID of the run. tags: The tags associated with the custom event (includes inherited tags). diff --git a/libs/core/langchain_core/callbacks/usage.py b/libs/core/langchain_core/callbacks/usage.py index 340e36eba21..29f07a2dad8 100644 --- a/libs/core/langchain_core/callbacks/usage.py +++ b/libs/core/langchain_core/callbacks/usage.py @@ -96,11 +96,10 @@ def get_usage_metadata_callback( """Get usage metadata callback. Get context manager for tracking usage metadata across chat model calls using - ``AIMessage.usage_metadata``. + `AIMessage.usage_metadata`. Args: - name: The name of the context variable. Defaults to - ``'usage_metadata_callback'``. + name: The name of the context variable. Yields: The usage metadata callback. diff --git a/libs/core/langchain_core/chat_history.py b/libs/core/langchain_core/chat_history.py index 29d05d99699..da3cf58a596 100644 --- a/libs/core/langchain_core/chat_history.py +++ b/libs/core/langchain_core/chat_history.py @@ -130,7 +130,7 @@ class BaseChatMessageHistory(ABC): """Convenience method for adding a human message string to the store. !!! note - This is a convenience method. Code should favor the bulk ``add_messages`` + This is a convenience method. Code should favor the bulk `add_messages` interface instead to save on round-trips to the persistence layer. This method may be deprecated in a future release. @@ -147,7 +147,7 @@ class BaseChatMessageHistory(ABC): """Convenience method for adding an AI message string to the store. !!! note - This is a convenience method. Code should favor the bulk ``add_messages`` + This is a convenience method. Code should favor the bulk `add_messages` interface instead to save on round-trips to the persistence layer. This method may be deprecated in a future release. diff --git a/libs/core/langchain_core/exceptions.py b/libs/core/langchain_core/exceptions.py index 30260ca12b4..375d2141f83 100644 --- a/libs/core/langchain_core/exceptions.py +++ b/libs/core/langchain_core/exceptions.py @@ -44,8 +44,8 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818 Defaults to `False`. Raises: - ValueError: If ``send_to_llm`` is True but either observation or - ``llm_output`` are not provided. + ValueError: If `send_to_llm` is True but either observation or + `llm_output` are not provided. """ if isinstance(error, str): error = create_message( diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 48cd9521221..8a1fc06b744 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -108,7 +108,7 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]: def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]: - """Format messages for tracing in ``on_chat_model_start``. + """Format messages for tracing in `on_chat_model_start`. - Update image content blocks to OpenAI Chat Completions format (backward compatibility). @@ -342,7 +342,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): ) """Version of `AIMessage` output format to store in message content. - `AIMessage.content_blocks` will lazily parse the contents of ``content`` into a + `AIMessage.content_blocks` will lazily parse the contents of `content` into a standard format. This flag can be used to additionally store the standard format in message content, e.g., for serialization purposes. @@ -1533,7 +1533,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): - a `TypedDict` class, - or a Pydantic class. - If ``schema`` is a Pydantic class then the model output will be a + If `schema` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool` @@ -1546,26 +1546,26 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. Raises: - ValueError: If there are any unsupported ``kwargs``. + ValueError: If there are any unsupported `kwargs`. NotImplementedError: If the model does not implement - ``with_structured_output()``. + `with_structured_output()`. Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs - an instance of ``schema`` (i.e., a Pydantic object). + If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs + an instance of `schema` (i.e., a Pydantic object). - Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + Otherwise, if `include_raw` is False then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: + If `include_raw` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None Example: Pydantic schema (include_raw=False): .. code-block:: python @@ -1693,7 +1693,7 @@ class SimpleChatModel(BaseChatModel): !!! note This implementation is primarily here for backwards compatibility. For new - implementations, please use ``BaseChatModel`` directly. + implementations, please use `BaseChatModel` directly. """ diff --git a/libs/core/langchain_core/language_models/fake_chat_models.py b/libs/core/langchain_core/language_models/fake_chat_models.py index 459367eb825..340f8ad3026 100644 --- a/libs/core/langchain_core/language_models/fake_chat_models.py +++ b/libs/core/langchain_core/language_models/fake_chat_models.py @@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig class FakeMessagesListChatModel(BaseChatModel): - """Fake ``ChatModel`` for testing purposes.""" + """Fake `ChatModel` for testing purposes.""" responses: list[BaseMessage] """List of responses to **cycle** through in order.""" @@ -228,10 +228,10 @@ class GenericFakeChatModel(BaseChatModel): """Generic fake chat model that can be used to test the chat model interface. * Chat model should be usable in both sync and async tests - * Invokes ``on_llm_new_token`` to allow for testing of callback related code for new - tokens. + * Invokes `on_llm_new_token` to allow for testing of callback related code for new + tokens. * Includes logic to break messages into message chunk to facilitate testing of - streaming. + streaming. """ @@ -242,7 +242,7 @@ class GenericFakeChatModel(BaseChatModel): to make the interface more generic if needed. !!! note - if you want to pass a list, you can use ``iter`` to convert it to an iterator. + if you want to pass a list, you can use `iter` to convert it to an iterator. !!! warning Streaming is not implemented yet. We should try to implement it in the future by diff --git a/libs/core/langchain_core/language_models/llms.py b/libs/core/langchain_core/language_models/llms.py index 61654405748..0519a0fe38e 100644 --- a/libs/core/langchain_core/language_models/llms.py +++ b/libs/core/langchain_core/language_models/llms.py @@ -835,7 +835,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): 1. Take advantage of batched calls, 2. Need more output from the model than just the top generated value, 3. Are building chains that are agnostic to the underlying language model - type (e.g., pure text completion models vs chat models). + type (e.g., pure text completion models vs chat models). Args: prompts: List of string prompts. @@ -857,8 +857,8 @@ class BaseLLM(BaseLanguageModel[str], ABC): Raises: ValueError: If prompts is not a list. - ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or - ``run_name`` (if provided) does not match the length of prompts. + ValueError: If the length of `callbacks`, `tags`, `metadata`, or + `run_name` (if provided) does not match the length of prompts. Returns: An LLMResult, which contains a list of candidate Generations for each input @@ -1105,7 +1105,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): 1. Take advantage of batched calls, 2. Need more output from the model than just the top generated value, 3. Are building chains that are agnostic to the underlying language model - type (e.g., pure text completion models vs chat models). + type (e.g., pure text completion models vs chat models). Args: prompts: List of string prompts. @@ -1126,8 +1126,8 @@ class BaseLLM(BaseLanguageModel[str], ABC): to the model provider API call. Raises: - ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or - ``run_name`` (if provided) does not match the length of prompts. + ValueError: If the length of `callbacks`, `tags`, `metadata`, or + `run_name` (if provided) does not match the length of prompts. Returns: An LLMResult, which contains a list of candidate Generations for each input diff --git a/libs/core/langchain_core/load/load.py b/libs/core/langchain_core/load/load.py index c31af9ae55f..7b71760cf21 100644 --- a/libs/core/langchain_core/load/load.py +++ b/libs/core/langchain_core/load/load.py @@ -107,7 +107,7 @@ class Reviver: ValueError: If trying to deserialize something that cannot be deserialized in the current version of langchain-core. NotImplementedError: If the object is not implemented and - ``ignore_unserializable_fields`` is False. + `ignore_unserializable_fields` is False. """ if ( value.get("lc") == 1 diff --git a/libs/core/langchain_core/load/serializable.py b/libs/core/langchain_core/load/serializable.py index 2d7dab470a0..f4f8c0417b7 100644 --- a/libs/core/langchain_core/load/serializable.py +++ b/libs/core/langchain_core/load/serializable.py @@ -34,7 +34,7 @@ class SerializedConstructor(BaseSerialized): """Serialized constructor.""" type: Literal["constructor"] - """The type of the object. Must be ``'constructor'``.""" + """The type of the object. Must be `'constructor'`.""" kwargs: dict[str, Any] """The constructor arguments.""" @@ -43,14 +43,14 @@ class SerializedSecret(BaseSerialized): """Serialized secret.""" type: Literal["secret"] - """The type of the object. Must be ``'secret'``.""" + """The type of the object. Must be `'secret'`.""" class SerializedNotImplemented(BaseSerialized): """Serialized not implemented.""" type: Literal["not_implemented"] - """The type of the object. Must be ``'not_implemented'``.""" + """The type of the object. Must be `'not_implemented'`.""" repr: str | None """The representation of the object. Optional.""" @@ -93,17 +93,17 @@ class Serializable(BaseModel, ABC): It relies on the following methods and properties: - `is_lc_serializable`: Is this class serializable? - By design, even if a class inherits from Serializable, it is not serializable by - default. This is to prevent accidental serialization of objects that should not - be serialized. - - ``get_lc_namespace``: Get the namespace of the langchain object. - During deserialization, this namespace is used to identify - the correct class to instantiate. - Please see the ``Reviver`` class in ``langchain_core.load.load`` for more details. - During deserialization an additional mapping is handle - classes that have moved or been renamed across package versions. - - ``lc_secrets``: A map of constructor argument names to secret ids. - - ``lc_attributes``: List of additional attribute names that should be included + By design, even if a class inherits from Serializable, it is not serializable by + default. This is to prevent accidental serialization of objects that should not + be serialized. + - `get_lc_namespace`: Get the namespace of the langchain object. + During deserialization, this namespace is used to identify + the correct class to instantiate. + Please see the `Reviver` class in `langchain_core.load.load` for more details. + During deserialization an additional mapping is handle + classes that have moved or been renamed across package versions. + - `lc_secrets`: A map of constructor argument names to secret ids. + - `lc_attributes`: List of additional attribute names that should be included as part of the serialized representation. """ diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index 4f5f8fb6b80..e5c772d2f43 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -193,7 +193,7 @@ class AIMessage(BaseMessage): ) -> None: """Initialize `AIMessage`. - Specify ``content`` as positional arg or ``content_blocks`` for typing. + Specify `content` as positional arg or `content_blocks` for typing. Args: content: The content of the message. @@ -335,7 +335,7 @@ class AIMessage(BaseMessage): Args: html: Whether to return an HTML-formatted string. - Defaults to `False`. + Defaults to `False`. Returns: A pretty representation of the message. @@ -380,7 +380,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] """The type of the message (used for deserialization). - Defaults to ``AIMessageChunk``. + Defaults to `AIMessageChunk`. """ @@ -390,8 +390,8 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): chunk_position: Literal["last"] | None = None """Optional span represented by an aggregated AIMessageChunk. - If a chunk with ``chunk_position="last"`` is aggregated into a stream, - ``tool_call_chunks`` in message content will be parsed into `tool_calls`. + If a chunk with `chunk_position="last"` is aggregated into a stream, + `tool_call_chunks` in message content will be parsed into `tool_calls`. """ @property @@ -596,14 +596,14 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): def add_ai_message_chunks( left: AIMessageChunk, *others: AIMessageChunk ) -> AIMessageChunk: - """Add multiple ``AIMessageChunk``s together. + """Add multiple `AIMessageChunk`s together. Args: - left: The first ``AIMessageChunk``. - *others: Other ``AIMessageChunk``s to add. + left: The first `AIMessageChunk`. + *others: Other `AIMessageChunk`s to add. Returns: - The resulting ``AIMessageChunk``. + The resulting `AIMessageChunk`. """ content = merge_content(left.content, *(o.content for o in others)) @@ -713,11 +713,11 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM ) Args: - left: The first ``UsageMetadata`` object. - right: The second ``UsageMetadata`` object. + left: The first `UsageMetadata` object. + right: The second `UsageMetadata` object. Returns: - The sum of the two ``UsageMetadata`` objects. + The sum of the two `UsageMetadata` objects. """ if not (left or right): @@ -740,9 +740,9 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM def subtract_usage( left: UsageMetadata | None, right: UsageMetadata | None ) -> UsageMetadata: - """Recursively subtract two ``UsageMetadata`` objects. + """Recursively subtract two `UsageMetadata` objects. - Token counts cannot be negative so the actual operation is ``max(left - right, 0)``. + Token counts cannot be negative so the actual operation is `max(left - right, 0)`. Example: .. code-block:: python @@ -777,11 +777,11 @@ def subtract_usage( ) Args: - left: The first ``UsageMetadata`` object. - right: The second ``UsageMetadata`` object. + left: The first `UsageMetadata` object. + right: The second `UsageMetadata` object. Returns: - The resulting ``UsageMetadata`` after subtraction. + The resulting `UsageMetadata` after subtraction. """ if not (left or right): diff --git a/libs/core/langchain_core/messages/base.py b/libs/core/langchain_core/messages/base.py index 69b3d02ce3a..0a735eb2829 100644 --- a/libs/core/langchain_core/messages/base.py +++ b/libs/core/langchain_core/messages/base.py @@ -48,13 +48,13 @@ class TextAccessor(str): Exists to maintain backward compatibility while transitioning from method-based to property-based text access in message objects. In LangChain str: """Enable method-style text access for backward compatibility. - This method exists solely to support legacy code that calls ``.text()`` - as a method. New code should use property access (``.text``) instead. + This method exists solely to support legacy code that calls `.text()` + as a method. New code should use property access (`.text`) instead. !!! deprecated - As of `langchain-core` 1.0.0, calling ``.text()`` as a method is deprecated. - Use ``.text`` as a property instead. This method will be removed in 2.0.0. + As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated. + Use `.text` as a property instead. This method will be removed in 2.0.0. Returns: The string content, identical to property access. @@ -92,7 +92,7 @@ class TextAccessor(str): class BaseMessage(Serializable): """Base abstract message class. - Messages are the inputs and outputs of a ``ChatModel``. + Messages are the inputs and outputs of a `ChatModel`. """ content: str | list[str | dict] @@ -161,7 +161,7 @@ class BaseMessage(Serializable): ) -> None: """Initialize `BaseMessage`. - Specify ``content`` as positional arg or ``content_blocks`` for typing. + Specify `content` as positional arg or `content_blocks` for typing. Args: content: The string contents of the message. @@ -187,7 +187,7 @@ class BaseMessage(Serializable): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "messages"]`` + `["langchain", "schema", "messages"]` """ return ["langchain", "schema", "messages"] @@ -259,11 +259,11 @@ class BaseMessage(Serializable): def text(self) -> TextAccessor: """Get the text content of the message as a string. - Can be used as both property (``message.text``) and method (``message.text()``). + Can be used as both property (`message.text`) and method (`message.text()`). !!! deprecated - As of langchain-core 1.0.0, calling ``.text()`` as a method is deprecated. - Use ``.text`` as a property instead. This method will be removed in 2.0.0. + As of langchain-core 1.0.0, calling `.text()` as a method is deprecated. + Use `.text` as a property instead. This method will be removed in 2.0.0. Returns: The text content of the message. @@ -331,8 +331,8 @@ def merge_content( """Merge multiple message contents. Args: - first_content: The first ``content``. Can be a string or a list. - contents: The other ``content``s. Can be a string or a list. + first_content: The first `content`. Can be a string or a list. + contents: The other `content`s. Can be a string or a list. Returns: The merged content. @@ -388,9 +388,9 @@ class BaseMessageChunk(BaseMessage): For example, - ``AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")`` + `AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")` - will give ``AIMessageChunk(content="Hello World")`` + will give `AIMessageChunk(content="Hello World")` """ if isinstance(other, BaseMessageChunk): @@ -440,7 +440,7 @@ def message_to_dict(message: BaseMessage) -> dict: Returns: Message as a dict. The dict will have a `type` key with the message type - and a ``data`` key with the message data as a dict. + and a `data` key with the message data as a dict. """ return {"type": message.type, "data": message.model_dump()} diff --git a/libs/core/langchain_core/messages/block_translators/__init__.py b/libs/core/langchain_core/messages/block_translators/__init__.py index 4bbb95a4bf7..11419fd5b81 100644 --- a/libs/core/langchain_core/messages/block_translators/__init__.py +++ b/libs/core/langchain_core/messages/block_translators/__init__.py @@ -1,7 +1,7 @@ """Derivations of standard content blocks from provider content. `AIMessage` will first attempt to use a provider-specific translator if -``model_provider`` is set in `response_metadata` on the message. Consequently, each +`model_provider` is set in `response_metadata` on the message. Consequently, each provider translator must handle all possible content response types from the provider, including text. @@ -23,13 +23,13 @@ if TYPE_CHECKING: PROVIDER_TRANSLATORS: dict[str, dict[str, Callable[..., list[types.ContentBlock]]]] = {} """Map model provider names to translator functions. -The dictionary maps provider names (e.g. ``'openai'``, ``'anthropic'``) to another +The dictionary maps provider names (e.g. `'openai'`, `'anthropic'`) to another dictionary with two keys: -- ``'translate_content'``: Function to translate `AIMessage` content. -- ``'translate_content_chunk'``: Function to translate ``AIMessageChunk`` content. +- `'translate_content'`: Function to translate `AIMessage` content. +- `'translate_content_chunk'`: Function to translate `AIMessageChunk` content. -When calling `.content_blocks` on an `AIMessage` or ``AIMessageChunk``, if -``model_provider`` is set in `response_metadata`, the corresponding translator +When calling `.content_blocks` on an `AIMessage` or `AIMessageChunk`, if +`model_provider` is set in `response_metadata`, the corresponding translator functions will be used to parse the content into blocks. Otherwise, best-effort parsing in `BaseMessage` will be used. """ @@ -43,9 +43,9 @@ def register_translator( """Register content translators for a provider in `PROVIDER_TRANSLATORS`. Args: - provider: The model provider name (e.g. ``'openai'``, ``'anthropic'``). + provider: The model provider name (e.g. `'openai'`, `'anthropic'`). translate_content: Function to translate `AIMessage` content. - translate_content_chunk: Function to translate ``AIMessageChunk`` content. + translate_content_chunk: Function to translate `AIMessageChunk` content. """ PROVIDER_TRANSLATORS[provider] = { "translate_content": translate_content, @@ -62,7 +62,7 @@ def get_translator( provider: The model provider name. Returns: - Dictionary with ``'translate_content'`` and ``'translate_content_chunk'`` + Dictionary with `'translate_content'` and `'translate_content_chunk'` functions, or None if no translator is registered for the provider. In such case, best-effort parsing in `BaseMessage` will be used. """ @@ -72,10 +72,10 @@ def get_translator( def _register_translators() -> None: """Register all translators in langchain-core. - A unit test ensures all modules in ``block_translators`` are represented here. + A unit test ensures all modules in `block_translators` are represented here. For translators implemented outside langchain-core, they can be registered by - calling ``register_translator`` from within the integration package. + calling `register_translator` from within the integration package. """ from langchain_core.messages.block_translators.anthropic import ( # noqa: PLC0415 _register_anthropic_translator, diff --git a/libs/core/langchain_core/messages/block_translators/anthropic.py b/libs/core/langchain_core/messages/block_translators/anthropic.py index 6d1b803ba7d..87f9df8a392 100644 --- a/libs/core/langchain_core/messages/block_translators/anthropic.py +++ b/libs/core/langchain_core/messages/block_translators/anthropic.py @@ -32,11 +32,11 @@ def _convert_to_v1_from_anthropic_input( """Convert Anthropic format blocks to v1 format. During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1 - block as a ``'non_standard'`` block with the original block stored in the ``value`` + block as a `'non_standard'` block with the original block stored in the `value` field. This function attempts to unpack those blocks and convert any blocks that might be Anthropic format to v1 ContentBlocks. - If conversion fails, the block is left as a ``'non_standard'`` block. + If conversion fails, the block is left as a `'non_standard'` block. Args: content: List of content blocks to process. diff --git a/libs/core/langchain_core/messages/block_translators/bedrock_converse.py b/libs/core/langchain_core/messages/block_translators/bedrock_converse.py index dfbc993db82..6d5e517e49f 100644 --- a/libs/core/langchain_core/messages/block_translators/bedrock_converse.py +++ b/libs/core/langchain_core/messages/block_translators/bedrock_converse.py @@ -36,11 +36,11 @@ def _convert_to_v1_from_converse_input( """Convert Bedrock Converse format blocks to v1 format. During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1 - block as a ``'non_standard'`` block with the original block stored in the ``value`` + block as a `'non_standard'` block with the original block stored in the `value` field. This function attempts to unpack those blocks and convert any blocks that might be Converse format to v1 ContentBlocks. - If conversion fails, the block is left as a ``'non_standard'`` block. + If conversion fails, the block is left as a `'non_standard'` block. Args: content: List of content blocks to process. diff --git a/libs/core/langchain_core/messages/block_translators/google_genai.py b/libs/core/langchain_core/messages/block_translators/google_genai.py index cf830fa8bea..8380a267a52 100644 --- a/libs/core/langchain_core/messages/block_translators/google_genai.py +++ b/libs/core/langchain_core/messages/block_translators/google_genai.py @@ -106,11 +106,11 @@ def _convert_to_v1_from_genai_input( `response_metadata`. During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1 - block as a ``'non_standard'`` block with the original block stored in the ``value`` + block as a `'non_standard'` block with the original block stored in the `value` field. This function attempts to unpack those blocks and convert any blocks that might be GenAI format to v1 ContentBlocks. - If conversion fails, the block is left as a ``'non_standard'`` block. + If conversion fails, the block is left as a `'non_standard'` block. Args: content: List of content blocks to process. diff --git a/libs/core/langchain_core/messages/block_translators/langchain_v0.py b/libs/core/langchain_core/messages/block_translators/langchain_v0.py index 9056c26c375..2172bf6e829 100644 --- a/libs/core/langchain_core/messages/block_translators/langchain_v0.py +++ b/libs/core/langchain_core/messages/block_translators/langchain_v0.py @@ -11,11 +11,11 @@ def _convert_v0_multimodal_input_to_v1( """Convert v0 multimodal blocks to v1 format. During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1 - block as a ``'non_standard'`` block with the original block stored in the ``value`` + block as a `'non_standard'` block with the original block stored in the `value` field. This function attempts to unpack those blocks and convert any v0 format blocks to v1 format. - If conversion fails, the block is left as a ``'non_standard'`` block. + If conversion fails, the block is left as a `'non_standard'` block. Args: content: List of content blocks to process. diff --git a/libs/core/langchain_core/messages/block_translators/openai.py b/libs/core/langchain_core/messages/block_translators/openai.py index e74ba68922d..5d60ce025ac 100644 --- a/libs/core/langchain_core/messages/block_translators/openai.py +++ b/libs/core/langchain_core/messages/block_translators/openai.py @@ -18,7 +18,7 @@ if TYPE_CHECKING: def convert_to_openai_image_block(block: dict[str, Any]) -> dict: - """Convert ``ImageContentBlock`` to format expected by OpenAI Chat Completions.""" + """Convert `ImageContentBlock` to format expected by OpenAI Chat Completions.""" if "url" in block: return { "type": "image_url", @@ -156,11 +156,11 @@ def _convert_to_v1_from_chat_completions_input( """Convert OpenAI Chat Completions format blocks to v1 format. During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1 - block as a ``'non_standard'`` block with the original block stored in the ``value`` + block as a `'non_standard'` block with the original block stored in the `value` field. This function attempts to unpack those blocks and convert any blocks that might be OpenAI format to v1 ContentBlocks. - If conversion fails, the block is left as a ``'non_standard'`` block. + If conversion fails, the block is left as a `'non_standard'` block. Args: content: List of content blocks to process. @@ -263,7 +263,7 @@ _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__" def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage: - """Convert v0 AIMessage into ``output_version="responses/v1"`` format.""" + """Convert v0 AIMessage into `output_version="responses/v1"` format.""" from langchain_core.messages import AIMessageChunk # noqa: PLC0415 # Only update ChatOpenAI v0.3 AIMessages diff --git a/libs/core/langchain_core/messages/chat.py b/libs/core/langchain_core/messages/chat.py index 35d7aafebfd..2050dd7fa0b 100644 --- a/libs/core/langchain_core/messages/chat.py +++ b/libs/core/langchain_core/messages/chat.py @@ -31,7 +31,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk): type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment] """The type of the message (used during serialization). - Defaults to ``'ChatMessageChunk'``. + Defaults to `'ChatMessageChunk'`. """ diff --git a/libs/core/langchain_core/messages/content.py b/libs/core/langchain_core/messages/content.py index 3700167202e..693afbd0993 100644 --- a/libs/core/langchain_core/messages/content.py +++ b/libs/core/langchain_core/messages/content.py @@ -5,7 +5,7 @@ change in future releases. This module provides standardized data structures for representing inputs to and -outputs from LLMs. The core abstraction is the **Content Block**, a ``TypedDict``. +outputs from LLMs. The core abstraction is the **Content Block**, a `TypedDict`. **Rationale** @@ -20,22 +20,22 @@ blocks into the format required by its API. **Extensibility** Data **not yet mapped** to a standard block may be represented using the -``NonStandardContentBlock``, which allows for provider-specific data to be included +`NonStandardContentBlock`, which allows for provider-specific data to be included without losing the benefits of type checking and validation. Furthermore, provider-specific fields **within** a standard block are fully supported -by default in the ``extras`` field of each block. This allows for additional metadata +by default in the `extras` field of each block. This allows for additional metadata to be included without breaking the standard structure. !!! warning - Do not heavily rely on the ``extras`` field for provider-specific data! This field + Do not heavily rely on the `extras` field for provider-specific data! This field is subject to deprecation in future releases as we move towards PEP 728. !!! note Following widespread adoption of [PEP 728](https://peps.python.org/pep-0728/), we - will add ``extra_items=Any`` as a param to Content Blocks. This will signify to type + will add `extra_items=Any` as a param to Content Blocks. This will signify to type checkers that additional provider-specific fields are allowed outside of the - ``extras`` field, and that will become the new standard approach to adding + `extras` field, and that will become the new standard approach to adding provider-specific metadata. ??? note @@ -72,7 +72,7 @@ to be included without breaking the standard structure. # Mutating an existing block to add provider-specific fields openai_data = my_block["openai_metadata"] # Type: Any - PEP 728 is enabled with ``# type: ignore[call-arg]`` comments to suppress + PEP 728 is enabled with `# type: ignore[call-arg]` comments to suppress warnings from type checkers that don't yet support it. The functionality works correctly in Python 3.13+ and will be fully supported as the ecosystem catches up. @@ -81,16 +81,16 @@ to be included without breaking the standard structure. The module defines several types of content blocks, including: -- ``TextContentBlock``: Standard text output. -- ``Citation``: For annotations that link text output to a source document. -- ``ToolCall``: For function calling. -- ``ReasoningContentBlock``: To capture a model's thought process. +- `TextContentBlock`: Standard text output. +- `Citation`: For annotations that link text output to a source document. +- `ToolCall`: For function calling. +- `ReasoningContentBlock`: To capture a model's thought process. - Multimodal data: - - ``ImageContentBlock`` - - ``AudioContentBlock`` - - ``VideoContentBlock`` - - ``PlainTextContentBlock`` (e.g. .txt or .md files) - - ``FileContentBlock`` (e.g. PDFs, etc.) + - `ImageContentBlock` + - `AudioContentBlock` + - `VideoContentBlock` + - `PlainTextContentBlock` (e.g. .txt or .md files) + - `FileContentBlock` (e.g. PDFs, etc.) **Example Usage** @@ -140,12 +140,12 @@ class Citation(TypedDict): """Annotation for citing data from a document. !!! note - ``start``/``end`` indices refer to the **response text**, + `start`/`end` indices refer to the **response text**, not the source text. This means that the indices are relative to the model's - response, not the original document (as specified in the ``url``). + response, not the original document (as specified in the `url`). !!! note - ``create_citation`` may also be used as a factory to create a ``Citation``. + `create_citation` may also be used as a factory to create a `Citation`. Benefits include: * Automatic ID generation (when not provided) @@ -160,7 +160,7 @@ class Citation(TypedDict): """Content block identifier. Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -174,10 +174,10 @@ class Citation(TypedDict): """ start_index: NotRequired[int] - """Start index of the **response text** (``TextContentBlock.text``).""" + """Start index of the **response text** (`TextContentBlock.text`).""" end_index: NotRequired[int] - """End index of the **response text** (``TextContentBlock.text``)""" + """End index of the **response text** (`TextContentBlock.text`)""" cited_text: NotRequired[str] """Excerpt of source text being cited.""" @@ -203,7 +203,7 @@ class NonStandardAnnotation(TypedDict): Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -221,8 +221,8 @@ class TextContentBlock(TypedDict): from a language model or the text of a user message. !!! note - ``create_text_block`` may also be used as a factory to create a - ``TextContentBlock``. Benefits include: + `create_text_block` may also be used as a factory to create a + `TextContentBlock`. Benefits include: * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -237,7 +237,7 @@ class TextContentBlock(TypedDict): Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -245,7 +245,7 @@ class TextContentBlock(TypedDict): """Block text.""" annotations: NotRequired[list[Annotation]] - """``Citation``s and other annotations.""" + """`Citation`s and other annotations.""" index: NotRequired[int | str] """Index of block in aggregate response. Used during streaming.""" @@ -267,8 +267,8 @@ class ToolCall(TypedDict): and an identifier of "123". !!! note - ``create_tool_call`` may also be used as a factory to create a - ``ToolCall``. Benefits include: + `create_tool_call` may also be used as a factory to create a + `ToolCall`. Benefits include: * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -303,9 +303,9 @@ class ToolCall(TypedDict): class ToolCallChunk(TypedDict): """A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunks`` (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not `None`. + values of `index` are equal and not `None`. Example: @@ -457,8 +457,8 @@ class ReasoningContentBlock(TypedDict): """Reasoning output from a LLM. !!! note - ``create_reasoning_block`` may also be used as a factory to create a - ``ReasoningContentBlock``. Benefits include: + `create_reasoning_block` may also be used as a factory to create a + `ReasoningContentBlock`. Benefits include: * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -473,7 +473,7 @@ class ReasoningContentBlock(TypedDict): Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -481,7 +481,7 @@ class ReasoningContentBlock(TypedDict): """Reasoning text. Either the thought summary or the raw reasoning text itself. This is often parsed - from ```` tags in the model's response. + from `` tags in the model's response. """ @@ -499,8 +499,8 @@ class ImageContentBlock(TypedDict): """Image data. !!! note - ``create_image_block`` may also be used as a factory to create a - ``ImageContentBlock``. Benefits include: + `create_image_block` may also be used as a factory to create a + `ImageContentBlock`. Benefits include: * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -515,7 +515,7 @@ class ImageContentBlock(TypedDict): Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -546,8 +546,8 @@ class VideoContentBlock(TypedDict): """Video data. !!! note - ``create_video_block`` may also be used as a factory to create a - ``VideoContentBlock``. Benefits include: + `create_video_block` may also be used as a factory to create a + `VideoContentBlock`. Benefits include: * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -562,7 +562,7 @@ class VideoContentBlock(TypedDict): Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -593,8 +593,8 @@ class AudioContentBlock(TypedDict): """Audio data. !!! note - ``create_audio_block`` may also be used as a factory to create an - ``AudioContentBlock``. Benefits include: + `create_audio_block` may also be used as a factory to create an + `AudioContentBlock`. Benefits include: * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -608,7 +608,7 @@ class AudioContentBlock(TypedDict): Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -639,18 +639,18 @@ class PlainTextContentBlock(TypedDict): """Plaintext data (e.g., from a document). !!! note - A ``PlainTextContentBlock`` existed in ``langchain-core<1.0.0``. Although the + A `PlainTextContentBlock` existed in `langchain-core<1.0.0`. Although the name has carried over, the structure has changed significantly. The only shared - keys between the old and new versions are `type` and ``text``, though the - `type` value has changed from ``'text'`` to ``'text-plain'``. + keys between the old and new versions are `type` and `text`, though the + `type` value has changed from `'text'` to `'text-plain'`. !!! note Title and context are optional fields that may be passed to the model. See Anthropic [example](https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content). !!! note - ``create_plaintext_block`` may also be used as a factory to create a - ``PlainTextContentBlock``. Benefits include: + `create_plaintext_block` may also be used as a factory to create a + `PlainTextContentBlock`. Benefits include: * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -665,7 +665,7 @@ class PlainTextContentBlock(TypedDict): Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -704,12 +704,12 @@ class FileContentBlock(TypedDict): example, it can be used for PDFs, Word documents, etc. If the file is an image, audio, or plaintext, you should use the corresponding - content block type (e.g., ``ImageContentBlock``, ``AudioContentBlock``, - ``PlainTextContentBlock``). + content block type (e.g., `ImageContentBlock`, `AudioContentBlock`, + `PlainTextContentBlock`). !!! note - ``create_file_block`` may also be used as a factory to create a - ``FileContentBlock``. Benefits include: + `create_file_block` may also be used as a factory to create a + `FileContentBlock`. Benefits include: * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -724,7 +724,7 @@ class FileContentBlock(TypedDict): Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -764,14 +764,14 @@ class NonStandardContentBlock(TypedDict): The purpose of this block should be to simply hold a provider-specific payload. If a provider's non-standard output includes reasoning and tool calls, it should be the adapter's job to parse that payload and emit the corresponding standard - ``ReasoningContentBlock`` and ``ToolCalls``. + `ReasoningContentBlock` and `ToolCalls`. - Has no ``extras`` field, as provider-specific data should be included in the - ``value`` field. + Has no `extras` field, as provider-specific data should be included in the + `value` field. !!! note - ``create_non_standard_block`` may also be used as a factory to create a - ``NonStandardContentBlock``. Benefits include: + `create_non_standard_block` may also be used as a factory to create a + `NonStandardContentBlock`. Benefits include: * Automatic ID generation (when not provided) * Required arguments strictly validated at creation time @@ -786,7 +786,7 @@ class NonStandardContentBlock(TypedDict): Either: - Generated by the provider (e.g., OpenAI's file ID) - - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`)) """ @@ -842,7 +842,7 @@ KNOWN_BLOCK_TYPES = { "non_standard", # citation and non_standard_annotation intentionally omitted } -"""These are block types known to ``langchain-core>=1.0.0``. +"""These are block types known to `langchain-core>=1.0.0`. If a block has a type not in this set, it is considered to be provider-specific. """ @@ -923,20 +923,20 @@ def create_text_block( index: int | str | None = None, **kwargs: Any, ) -> TextContentBlock: - """Create a ``TextContentBlock``. + """Create a `TextContentBlock`. Args: text: The text content of the block. id: Content block identifier. Generated automatically if not provided. - annotations: ``Citation``s and other annotations for the text. + annotations: `Citation`s and other annotations for the text. index: Index of block in aggregate response. Used during streaming. Returns: - A properly formatted ``TextContentBlock``. + A properly formatted `TextContentBlock`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ block = TextContentBlock( @@ -966,7 +966,7 @@ def create_image_block( index: int | str | None = None, **kwargs: Any, ) -> ImageContentBlock: - """Create an ``ImageContentBlock``. + """Create an `ImageContentBlock`. Args: url: URL of the image. @@ -977,15 +977,15 @@ def create_image_block( index: Index of block in aggregate response. Used during streaming. Returns: - A properly formatted ``ImageContentBlock``. + A properly formatted `ImageContentBlock`. Raises: - ValueError: If no image source is provided or if ``base64`` is used without - ``mime_type``. + ValueError: If no image source is provided or if `base64` is used without + `mime_type`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ if not any([url, base64, file_id]): @@ -1022,7 +1022,7 @@ def create_video_block( index: int | str | None = None, **kwargs: Any, ) -> VideoContentBlock: - """Create a ``VideoContentBlock``. + """Create a `VideoContentBlock`. Args: url: URL of the video. @@ -1033,15 +1033,15 @@ def create_video_block( index: Index of block in aggregate response. Used during streaming. Returns: - A properly formatted ``VideoContentBlock``. + A properly formatted `VideoContentBlock`. Raises: - ValueError: If no video source is provided or if ``base64`` is used without - ``mime_type``. + ValueError: If no video source is provided or if `base64` is used without + `mime_type`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ if not any([url, base64, file_id]): @@ -1082,7 +1082,7 @@ def create_audio_block( index: int | str | None = None, **kwargs: Any, ) -> AudioContentBlock: - """Create an ``AudioContentBlock``. + """Create an `AudioContentBlock`. Args: url: URL of the audio. @@ -1093,15 +1093,15 @@ def create_audio_block( index: Index of block in aggregate response. Used during streaming. Returns: - A properly formatted ``AudioContentBlock``. + A properly formatted `AudioContentBlock`. Raises: - ValueError: If no audio source is provided or if ``base64`` is used without - ``mime_type``. + ValueError: If no audio source is provided or if `base64` is used without + `mime_type`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ if not any([url, base64, file_id]): @@ -1142,7 +1142,7 @@ def create_file_block( index: int | str | None = None, **kwargs: Any, ) -> FileContentBlock: - """Create a ``FileContentBlock``. + """Create a `FileContentBlock`. Args: url: URL of the file. @@ -1153,15 +1153,15 @@ def create_file_block( index: Index of block in aggregate response. Used during streaming. Returns: - A properly formatted ``FileContentBlock``. + A properly formatted `FileContentBlock`. Raises: - ValueError: If no file source is provided or if ``base64`` is used without - ``mime_type``. + ValueError: If no file source is provided or if `base64` is used without + `mime_type`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ if not any([url, base64, file_id]): @@ -1203,7 +1203,7 @@ def create_plaintext_block( index: int | str | None = None, **kwargs: Any, ) -> PlainTextContentBlock: - """Create a ``PlainTextContentBlock``. + """Create a `PlainTextContentBlock`. Args: text: The plaintext content. @@ -1216,11 +1216,11 @@ def create_plaintext_block( index: Index of block in aggregate response. Used during streaming. Returns: - A properly formatted ``PlainTextContentBlock``. + A properly formatted `PlainTextContentBlock`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ block = PlainTextContentBlock( @@ -1259,7 +1259,7 @@ def create_tool_call( index: int | str | None = None, **kwargs: Any, ) -> ToolCall: - """Create a ``ToolCall``. + """Create a `ToolCall`. Args: name: The name of the tool to be called. @@ -1268,11 +1268,11 @@ def create_tool_call( index: Index of block in aggregate response. Used during streaming. Returns: - A properly formatted ``ToolCall``. + A properly formatted `ToolCall`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ block = ToolCall( @@ -1298,7 +1298,7 @@ def create_reasoning_block( index: int | str | None = None, **kwargs: Any, ) -> ReasoningContentBlock: - """Create a ``ReasoningContentBlock``. + """Create a `ReasoningContentBlock`. Args: reasoning: The reasoning text or thought summary. @@ -1306,11 +1306,11 @@ def create_reasoning_block( index: Index of block in aggregate response. Used during streaming. Returns: - A properly formatted ``ReasoningContentBlock``. + A properly formatted `ReasoningContentBlock`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ block = ReasoningContentBlock( @@ -1339,7 +1339,7 @@ def create_citation( id: str | None = None, **kwargs: Any, ) -> Citation: - """Create a ``Citation``. + """Create a `Citation`. Args: url: URL of the document source. @@ -1350,11 +1350,11 @@ def create_citation( id: Content block identifier. Generated automatically if not provided. Returns: - A properly formatted ``Citation``. + A properly formatted `Citation`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ block = Citation(type="citation", id=ensure_id(id)) @@ -1383,7 +1383,7 @@ def create_non_standard_block( id: str | None = None, index: int | str | None = None, ) -> NonStandardContentBlock: - """Create a ``NonStandardContentBlock``. + """Create a `NonStandardContentBlock`. Args: value: Provider-specific data. @@ -1391,11 +1391,11 @@ def create_non_standard_block( index: Index of block in aggregate response. Used during streaming. Returns: - A properly formatted ``NonStandardContentBlock``. + A properly formatted `NonStandardContentBlock`. !!! note The `id` is generated automatically if not provided, using a UUID4 format - prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + prefixed with `'lc_'` to indicate it is a LangChain-generated ID. """ block = NonStandardContentBlock( diff --git a/libs/core/langchain_core/messages/function.py b/libs/core/langchain_core/messages/function.py index 86406e7bc9e..2bd63e04e69 100644 --- a/libs/core/langchain_core/messages/function.py +++ b/libs/core/langchain_core/messages/function.py @@ -15,7 +15,7 @@ from langchain_core.utils._merge import merge_dicts class FunctionMessage(BaseMessage): """Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -28,7 +28,7 @@ class FunctionMessage(BaseMessage): """The name of the function that was executed.""" type: Literal["function"] = "function" - """The type of the message (used for serialization). Defaults to ``'function'``.""" + """The type of the message (used for serialization). Defaults to `'function'`.""" class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): @@ -40,7 +40,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment] """The type of the message (used for serialization). - Defaults to ``'FunctionMessageChunk'``. + Defaults to `'FunctionMessageChunk'`. """ diff --git a/libs/core/langchain_core/messages/human.py b/libs/core/langchain_core/messages/human.py index d3106a0f62b..c244aa9fdc6 100644 --- a/libs/core/langchain_core/messages/human.py +++ b/libs/core/langchain_core/messages/human.py @@ -31,7 +31,7 @@ class HumanMessage(BaseMessage): type: Literal["human"] = "human" """The type of the message (used for serialization). - Defaults to ``'human'``. + Defaults to `'human'`. """ @@ -56,7 +56,7 @@ class HumanMessage(BaseMessage): content_blocks: list[types.ContentBlock] | None = None, **kwargs: Any, ) -> None: - """Specify ``content`` as positional arg or ``content_blocks`` for typing.""" + """Specify `content` as positional arg or `content_blocks` for typing.""" if content_blocks is not None: super().__init__( content=cast("str | list[str | dict]", content_blocks), diff --git a/libs/core/langchain_core/messages/system.py b/libs/core/langchain_core/messages/system.py index 54c66a04005..003d466960b 100644 --- a/libs/core/langchain_core/messages/system.py +++ b/libs/core/langchain_core/messages/system.py @@ -31,7 +31,7 @@ class SystemMessage(BaseMessage): type: Literal["system"] = "system" """The type of the message (used for serialization). - Defaults to ``'system'``. + Defaults to `'system'`. """ @@ -56,7 +56,7 @@ class SystemMessage(BaseMessage): content_blocks: list[types.ContentBlock] | None = None, **kwargs: Any, ) -> None: - """Specify ``content`` as positional arg or ``content_blocks`` for typing.""" + """Specify `content` as positional arg or `content_blocks` for typing.""" if content_blocks is not None: super().__init__( content=cast("str | list[str | dict]", content_blocks), @@ -75,6 +75,6 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk): type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] """The type of the message (used for serialization). - Defaults to ``'SystemMessageChunk'``. + Defaults to `'SystemMessageChunk'`. """ diff --git a/libs/core/langchain_core/messages/tool.py b/libs/core/langchain_core/messages/tool.py index 0d0a2382b99..fd949587347 100644 --- a/libs/core/langchain_core/messages/tool.py +++ b/libs/core/langchain_core/messages/tool.py @@ -16,8 +16,8 @@ from langchain_core.utils._merge import merge_dicts, merge_obj class ToolOutputMixin: """Mixin for objects that tools can return directly. - If a custom BaseTool is invoked with a ``ToolCall`` and the output of custom code is - not an instance of ``ToolOutputMixin``, the output will automatically be coerced to + If a custom BaseTool is invoked with a `ToolCall` and the output of custom code is + not an instance of `ToolOutputMixin`, the output will automatically be coerced to a string and wrapped in a `ToolMessage`. """ @@ -27,9 +27,9 @@ class ToolMessage(BaseMessage, ToolOutputMixin): """Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python @@ -72,7 +72,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin): type: Literal["tool"] = "tool" """The type of the message (used for serialization). - Defaults to ``'tool'``. + Defaults to `'tool'`. """ @@ -167,7 +167,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin): ) -> None: """Initialize `ToolMessage`. - Specify ``content`` as positional arg or ``content_blocks`` for typing. + Specify `content` as positional arg or `content_blocks` for typing. Args: content: The string contents of the message. @@ -224,8 +224,8 @@ class ToolCall(TypedDict): {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. """ @@ -265,9 +265,9 @@ def tool_call( class ToolCallChunk(TypedDict): """A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index f192eba2e74..61a7d64007f 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -97,9 +97,9 @@ def get_buffer_string( Args: messages: Messages to be converted to strings. human_prefix: The prefix to prepend to contents of `HumanMessage`s. - Default is ``'Human'``. + Default is `'Human'`. ai_prefix: The prefix to prepend to contents of `AIMessage`. Default is - ``'AI'``. + `'AI'`. Returns: A single string concatenation of all input messages. @@ -178,7 +178,7 @@ def _message_from_dict(message: dict) -> BaseMessage: def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]: - """Convert a sequence of messages from dicts to ``Message`` objects. + """Convert a sequence of messages from dicts to `Message` objects. Args: messages: Sequence of messages (as dicts) to convert. @@ -191,7 +191,7 @@ def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]: def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage: - """Convert a message chunk to a ``Message``. + """Convert a message chunk to a `Message`. Args: chunk: Message chunk to convert. @@ -224,10 +224,10 @@ def _create_message_from_message_type( id: str | None = None, **additional_kwargs: Any, ) -> BaseMessage: - """Create a message from a ``Message`` type and content string. + """Create a message from a `Message` type and content string. Args: - message_type: (str) the type of the message (e.g., ``'human'``, ``'ai'``, etc.). + message_type: (str) the type of the message (e.g., `'human'`, `'ai'`, etc.). content: (str) the content string. name: (str) the name of the message. Default is None. tool_call_id: (str) the tool call id. Default is None. @@ -239,9 +239,9 @@ def _create_message_from_message_type( a message of the appropriate type. Raises: - ValueError: if the message type is not one of ``'human'``, ``'user'``, ``'ai'``, - ``'assistant'``, ``'function'``, ``'tool'``, ``'system'``, or - ``'developer'``. + ValueError: if the message type is not one of `'human'`, `'user'`, `'ai'`, + `'assistant'`, `'function'`, `'tool'`, `'system'`, or + `'developer'`. """ kwargs: dict[str, Any] = {} if name is not None: @@ -307,15 +307,15 @@ def _create_message_from_message_type( def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage: - """Instantiate a ``Message`` from a variety of message formats. + """Instantiate a `Message` from a variety of message formats. The message format can be one of the following: - - ``BaseMessagePromptTemplate`` + - `BaseMessagePromptTemplate` - `BaseMessage` - - 2-tuple of (role string, template); e.g., (``'human'``, ``'{user_input}'``) + - 2-tuple of (role string, template); e.g., (`'human'`, `'{user_input}'`) - dict: a message dict with role and content keys - - string: shorthand for (``'human'``, template); e.g., ``'{user_input}'`` + - string: shorthand for (`'human'`, template); e.g., `'{user_input}'` Args: message: a representation of a message in one of the supported formats. @@ -430,11 +430,11 @@ def filter_messages( include_names: Message names to include. Default is None. exclude_names: Messages names to exclude. Default is None. include_types: Message types to include. Can be specified as string names - (e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as `BaseMessage` + (e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage` classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...). Default is None. exclude_types: Message types to exclude. Can be specified as string names - (e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as `BaseMessage` + (e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage` classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...). Default is None. include_ids: Message IDs to include. Default is None. @@ -442,17 +442,17 @@ def filter_messages( exclude_tool_calls: Tool call IDs to exclude. Default is None. Can be one of the following: - `True`: all `AIMessage`s with tool calls and all - `ToolMessage` objects will be excluded. + `ToolMessage` objects will be excluded. - a sequence of tool call IDs to exclude: - - `ToolMessage` objects with the corresponding tool call ID will be - excluded. - - The `tool_calls` in the AIMessage will be updated to exclude - matching tool calls. If all `tool_calls` are filtered from an - AIMessage, the whole message is excluded. + - `ToolMessage` objects with the corresponding tool call ID will be + excluded. + - The `tool_calls` in the AIMessage will be updated to exclude + matching tool calls. If all `tool_calls` are filtered from an + AIMessage, the whole message is excluded. Returns: - A list of Messages that meets at least one of the ``incl_*`` conditions and none - of the ``excl_*`` conditions. If not ``incl_*`` conditions are specified then + A list of Messages that meets at least one of the `incl_*` conditions and none + of the `excl_*` conditions. If not `incl_*` conditions are specified then anything that is not explicitly excluded will be included. Raises: @@ -571,7 +571,7 @@ def merge_message_runs( Args: messages: Sequence Message-like objects to merge. chunk_separator: Specify the string to be inserted between message chunks. - Defaults to ``'\n'``. + Defaults to `'\n'`. Returns: list of BaseMessages with consecutive runs of message types merged into single @@ -579,7 +579,7 @@ def merge_message_runs( the merged content is a concatenation of the two strings with a new-line separator. The separator inserted between message chunks can be controlled by specifying - any string with ``chunk_separator``. If at least one of the messages has a list + any string with `chunk_separator`. If at least one of the messages has a list of content blocks, the merged content is a list of content blocks. Example: @@ -706,7 +706,7 @@ def trim_messages( ) -> list[BaseMessage]: r"""Trim messages to be below a token count. - ``trim_messages`` can be used to reduce the size of a chat history to a specified + `trim_messages` can be used to reduce the size of a chat history to a specified token count or specified message count. In either case, if passing the trimmed chat history back into a chat model @@ -714,22 +714,22 @@ def trim_messages( properties: 1. The resulting chat history should be valid. Most chat models expect that chat - history starts with either (1) a `HumanMessage` or (2) a `SystemMessage` - followed by a `HumanMessage`. To achieve this, set ``start_on='human'``. - In addition, generally a `ToolMessage` can only appear after an `AIMessage` - that involved a tool call. - Please see the following link for more information about messages: - https://python.langchain.com/docs/concepts/#messages + history starts with either (1) a `HumanMessage` or (2) a `SystemMessage` + followed by a `HumanMessage`. To achieve this, set `start_on='human'`. + In addition, generally a `ToolMessage` can only appear after an `AIMessage` + that involved a tool call. + Please see the following link for more information about messages: + https://python.langchain.com/docs/concepts/#messages 2. It includes recent messages and drops old messages in the chat history. - To achieve this set the ``strategy='last'``. + To achieve this set the `strategy='last'`. 3. Usually, the new chat history should include the `SystemMessage` if it - was present in the original chat history since the `SystemMessage` includes - special instructions to the chat model. The `SystemMessage` is almost always - the first message in the history if present. To achieve this set the - ``include_system=True``. + was present in the original chat history since the `SystemMessage` includes + special instructions to the chat model. The `SystemMessage` is almost always + the first message in the history if present. To achieve this set the + `include_system=True`. !!! note - The examples below show how to configure ``trim_messages`` to achieve a behavior + The examples below show how to configure `trim_messages` to achieve a behavior consistent with the above properties. Args: @@ -737,49 +737,49 @@ def trim_messages( max_tokens: Max token count of trimmed messages. token_counter: Function or llm for counting tokens in a `BaseMessage` or a list of `BaseMessage`. If a `BaseLanguageModel` is passed in then - ``BaseLanguageModel.get_num_tokens_from_messages()`` will be used. - Set to ``len`` to count the number of **messages** in the chat history. + `BaseLanguageModel.get_num_tokens_from_messages()` will be used. + Set to `len` to count the number of **messages** in the chat history. !!! note - Use ``count_tokens_approximately`` to get fast, approximate token + Use `count_tokens_approximately` to get fast, approximate token counts. - This is recommended for using ``trim_messages`` on the hot path, where + This is recommended for using `trim_messages` on the hot path, where exact token counting is not necessary. strategy: Strategy for trimming. - - ``'first'``: Keep the first ``<= n_count`` tokens of the messages. - - ``'last'``: Keep the last ``<= n_count`` tokens of the messages. - Default is ``'last'``. + - `'first'`: Keep the first `<= n_count` tokens of the messages. + - `'last'`: Keep the last `<= n_count` tokens of the messages. + Default is `'last'`. allow_partial: Whether to split a message if only part of the message can be - included. If ``strategy='last'`` then the last partial contents of a message - are included. If ``strategy='first'`` then the first partial contents of a + included. If `strategy='last'` then the last partial contents of a message + are included. If `strategy='first'` then the first partial contents of a message are included. Default is False. end_on: The message type to end on. If specified then every message after the - last occurrence of this type is ignored. If ``strategy='last'`` then this - is done before we attempt to get the last ``max_tokens``. If - ``strategy='first'`` then this is done after we get the first - ``max_tokens``. Can be specified as string names (e.g. ``'system'``, - ``'human'``, ``'ai'``, ...) or as `BaseMessage` classes (e.g. + last occurrence of this type is ignored. If `strategy='last'` then this + is done before we attempt to get the last `max_tokens`. If + `strategy='first'` then this is done after we get the first + `max_tokens`. Can be specified as string names (e.g. `'system'`, + `'human'`, `'ai'`, ...) or as `BaseMessage` classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...). Can be a single type or a list of types. Default is None. start_on: The message type to start on. Should only be specified if - ``strategy='last'``. If specified then every message before + `strategy='last'`. If specified then every message before the first occurrence of this type is ignored. This is done after we trim - the initial messages to the last ``max_tokens``. Does not - apply to a `SystemMessage` at index 0 if ``include_system=True``. Can be - specified as string names (e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or + the initial messages to the last `max_tokens`. Does not + apply to a `SystemMessage` at index 0 if `include_system=True`. Can be + specified as string names (e.g. `'system'`, `'human'`, `'ai'`, ...) or as `BaseMessage` classes (e.g. `SystemMessage`, `HumanMessage`, `AIMessage`, ...). Can be a single type or a list of types. Default is None. include_system: Whether to keep the SystemMessage if there is one at index 0. - Should only be specified if ``strategy="last"``. + Should only be specified if `strategy="last"`. Default is False. - text_splitter: Function or ``langchain_text_splitters.TextSplitter`` for + text_splitter: Function or `langchain_text_splitters.TextSplitter` for splitting the string contents of a message. Only used if - ``allow_partial=True``. If ``strategy='last'`` then the last split tokens - from a partial message will be included. if ``strategy='first'`` then the + `allow_partial=True`. If `strategy='last'` then the last split tokens + from a partial message will be included. if `strategy='first'` then the first split tokens from a partial message will be included. Token splitter assumes that separators are kept, so that split contents can be directly concatenated to recreate the original text. Defaults to splitting on @@ -790,7 +790,7 @@ def trim_messages( Raises: ValueError: if two incompatible arguments are specified or an unrecognized - ``strategy`` is specified. + `strategy` is specified. Example: Trim chat history based on token count, keeping the `SystemMessage` if @@ -1042,21 +1042,21 @@ def convert_to_openai_messages( messages: Message-like object or iterable of objects whose contents are in OpenAI, Anthropic, Bedrock Converse, or VertexAI formats. text_format: How to format string or text block contents: - - ``'string'``: + - `'string'`: If a message has a string content, this is left as a string. If - a message has content blocks that are all of type ``'text'``, these + a message has content blocks that are all of type `'text'`, these are joined with a newline to make a single string. If a message has - content blocks and at least one isn't of type ``'text'``, then + content blocks and at least one isn't of type `'text'`, then all blocks are left as dicts. - - ``'block'``: + - `'block'`: If a message has a string content, this is turned into a list - with a single content block of type ``'text'``. If a message has + with a single content block of type `'text'`. If a message has content blocks these are left as is. include_id: Whether to include message ids in the openai messages, if they are present in the source messages. Raises: - ValueError: if an unrecognized ``text_format`` is specified, or if a message + ValueError: if an unrecognized `text_format` is specified, or if a message content block is missing expected keys. Returns: diff --git a/libs/core/langchain_core/output_parsers/list.py b/libs/core/langchain_core/output_parsers/list.py index 908098ae15c..f2c087225c3 100644 --- a/libs/core/langchain_core/output_parsers/list.py +++ b/libs/core/langchain_core/output_parsers/list.py @@ -149,7 +149,7 @@ class CommaSeparatedListOutputParser(ListOutputParser): """Get the namespace of the langchain object. Returns: - ``["langchain", "output_parsers", "list"]`` + `["langchain", "output_parsers", "list"]` """ return ["langchain", "output_parsers", "list"] diff --git a/libs/core/langchain_core/output_parsers/string.py b/libs/core/langchain_core/output_parsers/string.py index 35c9aab89f4..566daa25d82 100644 --- a/libs/core/langchain_core/output_parsers/string.py +++ b/libs/core/langchain_core/output_parsers/string.py @@ -22,7 +22,7 @@ class StrOutputParser(BaseTransformOutputParser[str]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "output_parser"]`` + `["langchain", "schema", "output_parser"]` """ return ["langchain", "schema", "output_parser"] diff --git a/libs/core/langchain_core/outputs/__init__.py b/libs/core/langchain_core/outputs/__init__.py index 1e64d4a83aa..07b35d95591 100644 --- a/libs/core/langchain_core/outputs/__init__.py +++ b/libs/core/langchain_core/outputs/__init__.py @@ -12,7 +12,7 @@ When invoking models via the standard runnable methods (e.g. invoke, batch, etc. - LLMs will return regular text strings. In addition, users can access the raw output of either LLMs or chat models via -callbacks. The ``on_chat_model_end`` and ``on_llm_end`` callbacks will return an +callbacks. The `on_chat_model_end` and `on_llm_end` callbacks will return an LLMResult object containing the generated outputs and any additional information returned by the model provider. diff --git a/libs/core/langchain_core/outputs/chat_generation.py b/libs/core/langchain_core/outputs/chat_generation.py index a0887c65e3b..209f08937ab 100644 --- a/libs/core/langchain_core/outputs/chat_generation.py +++ b/libs/core/langchain_core/outputs/chat_generation.py @@ -15,10 +15,10 @@ from langchain_core.utils._merge import merge_dicts class ChatGeneration(Generation): """A single chat generation output. - A subclass of ``Generation`` that represents the response from a chat model + A subclass of `Generation` that represents the response from a chat model that generates chat messages. - The ``message`` attribute is a structured representation of the chat message. + The `message` attribute is a structured representation of the chat message. Most of the time, the message will be of type `AIMessage`. Users working with chat models will usually access information via either @@ -70,9 +70,9 @@ class ChatGeneration(Generation): class ChatGenerationChunk(ChatGeneration): - """``ChatGeneration`` chunk. + """`ChatGeneration` chunk. - ``ChatGeneration`` chunks can be concatenated with other ``ChatGeneration`` chunks. + `ChatGeneration` chunks can be concatenated with other `ChatGeneration` chunks. """ message: BaseMessageChunk diff --git a/libs/core/langchain_core/outputs/generation.py b/libs/core/langchain_core/outputs/generation.py index 2b42d1e8720..960563dac99 100644 --- a/libs/core/langchain_core/outputs/generation.py +++ b/libs/core/langchain_core/outputs/generation.py @@ -47,7 +47,7 @@ class Generation(Serializable): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "output"]`` + `["langchain", "schema", "output"]` """ return ["langchain", "schema", "output"] @@ -56,16 +56,16 @@ class GenerationChunk(Generation): """Generation chunk, which can be concatenated with other Generation chunks.""" def __add__(self, other: GenerationChunk) -> GenerationChunk: - """Concatenate two ``GenerationChunk``s. + """Concatenate two `GenerationChunk`s. Args: - other: Another ``GenerationChunk`` to concatenate with. + other: Another `GenerationChunk` to concatenate with. Raises: - TypeError: If other is not a ``GenerationChunk``. + TypeError: If other is not a `GenerationChunk`. Returns: - A new ``GenerationChunk`` concatenated from self and other. + A new `GenerationChunk` concatenated from self and other. """ if isinstance(other, GenerationChunk): generation_info = merge_dicts( diff --git a/libs/core/langchain_core/outputs/llm_result.py b/libs/core/langchain_core/outputs/llm_result.py index fa6782a5a5e..ddb12a77f6d 100644 --- a/libs/core/langchain_core/outputs/llm_result.py +++ b/libs/core/langchain_core/outputs/llm_result.py @@ -30,8 +30,8 @@ class LLMResult(BaseModel): The second dimension of the list represents different candidate generations for a given prompt. - - When returned from **an LLM**, the type is ``list[list[Generation]]``. - - When returned from a **chat model**, the type is ``list[list[ChatGeneration]]``. + - When returned from **an LLM**, the type is `list[list[Generation]]`. + - When returned from a **chat model**, the type is `list[list[ChatGeneration]]`. ChatGeneration is a subclass of Generation that has a field for a structured chat message. @@ -97,7 +97,7 @@ class LLMResult(BaseModel): other: Another `LLMResult` object to compare against. Returns: - True if the generations and ``llm_output`` are equal, False otherwise. + True if the generations and `llm_output` are equal, False otherwise. """ if not isinstance(other, LLMResult): return NotImplemented diff --git a/libs/core/langchain_core/prompt_values.py b/libs/core/langchain_core/prompt_values.py index f572ce0eaf4..8380a3e067a 100644 --- a/libs/core/langchain_core/prompt_values.py +++ b/libs/core/langchain_core/prompt_values.py @@ -40,7 +40,7 @@ class PromptValue(Serializable, ABC): This is used to determine the namespace of the object when serializing. Returns: - ``["langchain", "schema", "prompt"]`` + `["langchain", "schema", "prompt"]` """ return ["langchain", "schema", "prompt"] @@ -67,7 +67,7 @@ class StringPromptValue(PromptValue): This is used to determine the namespace of the object when serializing. Returns: - ``["langchain", "prompts", "base"]`` + `["langchain", "prompts", "base"]` """ return ["langchain", "prompts", "base"] @@ -104,7 +104,7 @@ class ChatPromptValue(PromptValue): This is used to determine the namespace of the object when serializing. Returns: - ``["langchain", "prompts", "chat"]`` + `["langchain", "prompts", "chat"]` """ return ["langchain", "prompts", "chat"] diff --git a/libs/core/langchain_core/prompts/base.py b/libs/core/langchain_core/prompts/base.py index 37663a83a85..2e45a58b938 100644 --- a/libs/core/langchain_core/prompts/base.py +++ b/libs/core/langchain_core/prompts/base.py @@ -99,7 +99,7 @@ class BasePromptTemplate( """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "prompt_template"]`` + `["langchain", "schema", "prompt_template"]` """ return ["langchain", "schema", "prompt_template"] diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index ef89fde2043..e5f582deabd 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -238,11 +238,11 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): template: a template. template_format: format of the template. Defaults to "f-string". partial_variables: A dictionary of variables that can be used to partially - fill in the template. For example, if the template is - `"{variable1} {variable2}"`, and `partial_variables` is - `{"variable1": "foo"}`, then the final prompt will be - `"foo {variable2}"`. - Defaults to `None`. + fill in the template. For example, if the template is + `"{variable1} {variable2}"`, and `partial_variables` is + `{"variable1": "foo"}`, then the final prompt will be + `"foo {variable2}"`. + Defaults to `None`. **kwargs: keyword arguments to pass to the constructor. Returns: @@ -685,7 +685,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC): Args: **kwargs: keyword arguments to use for filling in template variables - in all the template messages in this chat template. + in all the template messages in this chat template. Returns: formatted string. @@ -697,7 +697,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC): Args: **kwargs: keyword arguments to use for filling in template variables - in all the template messages in this chat template. + in all the template messages in this chat template. Returns: formatted string. @@ -781,7 +781,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): Examples: !!! warning "Behavior changed in 0.2.24" You can pass any Message-like formats supported by - ``ChatPromptTemplate.from_messages()`` directly to ``ChatPromptTemplate()`` + `ChatPromptTemplate.from_messages()` directly to `ChatPromptTemplate()` init. .. code-block:: python @@ -902,11 +902,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate): Args: messages: sequence of message representations. - A message can be represented using the following formats: - (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of - (message type, template); e.g., ("human", "{user_input}"), - (4) 2-tuple of (message class, template), (5) a string which is - shorthand for ("human", template); e.g., "{user_input}". + A message can be represented using the following formats: + (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of + (message type, template); e.g., ("human", "{user_input}"), + (4) 2-tuple of (message class, template), (5) a string which is + shorthand for ("human", template); e.g., "{user_input}". template_format: format of the template. Defaults to "f-string". input_variables: A list of the names of the variables whose values are required as inputs to the prompt. @@ -977,7 +977,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): """Get the namespace of the langchain object. Returns: - ``["langchain", "prompts", "chat"]`` + `["langchain", "prompts", "chat"]` """ return ["langchain", "prompts", "chat"] @@ -1127,11 +1127,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate): Args: messages: sequence of message representations. - A message can be represented using the following formats: - (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of - (message type, template); e.g., ("human", "{user_input}"), - (4) 2-tuple of (message class, template), (5) a string which is - shorthand for ("human", template); e.g., "{user_input}". + A message can be represented using the following formats: + (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of + (message type, template); e.g., ("human", "{user_input}"), + (4) 2-tuple of (message class, template), (5) a string which is + shorthand for ("human", template); e.g., "{user_input}". template_format: format of the template. Defaults to "f-string". Returns: @@ -1145,7 +1145,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): Args: **kwargs: keyword arguments to use for filling in template variables - in all the template messages in this chat template. + in all the template messages in this chat template. Raises: ValueError: if messages are of unexpected types. @@ -1173,7 +1173,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): Args: **kwargs: keyword arguments to use for filling in template variables - in all the template messages in this chat template. + in all the template messages in this chat template. Returns: list of formatted messages. @@ -1262,7 +1262,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate): Returns: If index is an int, returns the message at that index. - If index is a slice, returns a new ``ChatPromptTemplate`` + If index is a slice, returns a new `ChatPromptTemplate` containing the messages in that slice. """ if isinstance(index, slice): diff --git a/libs/core/langchain_core/prompts/dict.py b/libs/core/langchain_core/prompts/dict.py index 40fc99ef700..a8b0094f78e 100644 --- a/libs/core/langchain_core/prompts/dict.py +++ b/libs/core/langchain_core/prompts/dict.py @@ -77,7 +77,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]): """Get the namespace of the langchain object. Returns: - ``["langchain_core", "prompts", "dict"]`` + `["langchain_core", "prompts", "dict"]` """ return ["langchain_core", "prompts", "dict"] diff --git a/libs/core/langchain_core/prompts/few_shot_with_templates.py b/libs/core/langchain_core/prompts/few_shot_with_templates.py index 3ca6d9f426b..bf386d6cbff 100644 --- a/libs/core/langchain_core/prompts/few_shot_with_templates.py +++ b/libs/core/langchain_core/prompts/few_shot_with_templates.py @@ -49,7 +49,7 @@ class FewShotPromptWithTemplates(StringPromptTemplate): """Get the namespace of the langchain object. Returns: - ``["langchain", "prompts", "few_shot_with_templates"]`` + `["langchain", "prompts", "few_shot_with_templates"]` """ return ["langchain", "prompts", "few_shot_with_templates"] diff --git a/libs/core/langchain_core/prompts/image.py b/libs/core/langchain_core/prompts/image.py index ab237e39c9a..9f75013ed63 100644 --- a/libs/core/langchain_core/prompts/image.py +++ b/libs/core/langchain_core/prompts/image.py @@ -26,8 +26,8 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]): """Create an image prompt template. Raises: - ValueError: If the input variables contain ``'url'``, ``'path'``, or - ``'detail'``. + ValueError: If the input variables contain `'url'`, `'path'`, or + `'detail'`. """ if "input_variables" not in kwargs: kwargs["input_variables"] = [] @@ -52,7 +52,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]): """Get the namespace of the langchain object. Returns: - ``["langchain", "prompts", "image"]`` + `["langchain", "prompts", "image"]` """ return ["langchain", "prompts", "image"] @@ -93,7 +93,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]): Raises: ValueError: If the url is not provided. ValueError: If the url is not a string. - ValueError: If ``'path'`` is provided in the template or kwargs. + ValueError: If `'path'` is provided in the template or kwargs. Example: diff --git a/libs/core/langchain_core/prompts/message.py b/libs/core/langchain_core/prompts/message.py index 0ae76370b77..d1938ff4d4c 100644 --- a/libs/core/langchain_core/prompts/message.py +++ b/libs/core/langchain_core/prompts/message.py @@ -26,7 +26,7 @@ class BaseMessagePromptTemplate(Serializable, ABC): """Get the namespace of the langchain object. Returns: - ``["langchain", "prompts", "chat"]`` + `["langchain", "prompts", "chat"]` """ return ["langchain", "prompts", "chat"] diff --git a/libs/core/langchain_core/prompts/prompt.py b/libs/core/langchain_core/prompts/prompt.py index 47a05e79947..7b278add124 100644 --- a/libs/core/langchain_core/prompts/prompt.py +++ b/libs/core/langchain_core/prompts/prompt.py @@ -71,7 +71,7 @@ class PromptTemplate(StringPromptTemplate): """Get the namespace of the langchain object. Returns: - ``["langchain", "prompts", "prompt"]`` + `["langchain", "prompts", "prompt"]` """ return ["langchain", "prompts", "prompt"] @@ -144,10 +144,10 @@ class PromptTemplate(StringPromptTemplate): Raises: ValueError: If the template formats are not f-string or if there are conflicting partial variables. - NotImplementedError: If the other object is not a ``PromptTemplate`` or str. + NotImplementedError: If the other object is not a `PromptTemplate` or str. Returns: - A new ``PromptTemplate`` that is the combination of the two. + A new `PromptTemplate` that is the combination of the two. """ # Allow for easy combining if isinstance(other, PromptTemplate): diff --git a/libs/core/langchain_core/prompts/string.py b/libs/core/langchain_core/prompts/string.py index 7e7bc115fa1..581cff8bf5e 100644 --- a/libs/core/langchain_core/prompts/string.py +++ b/libs/core/langchain_core/prompts/string.py @@ -276,7 +276,7 @@ class StringPromptTemplate(BasePromptTemplate, ABC): """Get the namespace of the langchain object. Returns: - ``["langchain", "prompts", "base"]`` + `["langchain", "prompts", "base"]` """ return ["langchain", "prompts", "base"] diff --git a/libs/core/langchain_core/prompts/structured.py b/libs/core/langchain_core/prompts/structured.py index 23c4956f088..59a1c2754a1 100644 --- a/libs/core/langchain_core/prompts/structured.py +++ b/libs/core/langchain_core/prompts/structured.py @@ -65,8 +65,8 @@ class StructuredPrompt(ChatPromptTemplate): def get_lc_namespace(cls) -> list[str]: """Get the namespace of the langchain object. - For example, if the class is ``langchain.llms.openai.OpenAI``, then the - namespace is ``["langchain", "llms", "openai"]`` + For example, if the class is `langchain.llms.openai.OpenAI`, then the + namespace is `["langchain", "llms", "openai"]` Returns: The namespace of the langchain object. @@ -106,14 +106,14 @@ class StructuredPrompt(ChatPromptTemplate): Args: messages: sequence of message representations. - A message can be represented using the following formats: - (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of - (message type, template); e.g., ("human", "{user_input}"), - (4) 2-tuple of (message class, template), (5) a string which is - shorthand for ("human", template); e.g., "{user_input}" + A message can be represented using the following formats: + (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of + (message type, template); e.g., ("human", "{user_input}"), + (4) 2-tuple of (message class, template), (5) a string which is + shorthand for ("human", template); e.g., "{user_input}" schema: a dictionary representation of function call, or a Pydantic model. **kwargs: Any additional kwargs to pass through to - ``ChatModel.with_structured_output(schema, **kwargs)``. + `ChatModel.with_structured_output(schema, **kwargs)`. Returns: a structured prompt template diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 7843668bb7f..d5ca1d48c7e 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -129,22 +129,22 @@ class Runnable(ABC, Generic[Input, Output]): - **`batch`/`abatch`**: Efficiently transforms multiple inputs into outputs. - **`stream`/`astream`**: Streams output from a single input as it's produced. - **`astream_log`**: Streams output and selected intermediate results from an - input. + input. Built-in optimizations: - **Batch**: By default, batch runs invoke() in parallel using a thread pool - executor. Override to optimize batching. + executor. Override to optimize batching. - **Async**: Methods with `'a'` suffix are asynchronous. By default, they execute - the sync counterpart using asyncio's thread pool. - Override for native async. + the sync counterpart using asyncio's thread pool. + Override for native async. All methods accept an optional config argument, which can be used to configure execution, add tags and metadata for tracing and debugging etc. Runnables expose schematic information about their input, output and config via - the ``input_schema`` property, the ``output_schema`` property and ``config_schema`` + the `input_schema` property, the `output_schema` property and `config_schema` method. LCEL and Composition @@ -155,15 +155,15 @@ class Runnable(ABC, Generic[Input, Output]): Any chain constructed this way will automatically have sync, async, batch, and streaming support. - The main composition primitives are `RunnableSequence` and ``RunnableParallel``. + The main composition primitives are `RunnableSequence` and `RunnableParallel`. **`RunnableSequence`** invokes a series of runnables sequentially, with one Runnable's output serving as the next's input. Construct using - the ``|`` operator or by passing a list of runnables to `RunnableSequence`. + the `|` operator or by passing a list of runnables to `RunnableSequence`. - **``RunnableParallel``** invokes runnables concurrently, providing the same input + **`RunnableParallel`** invokes runnables concurrently, providing the same input to each. Construct it using a dict literal within a sequence or by passing a - dict to ``RunnableParallel``. + dict to `RunnableParallel`. For example, @@ -818,9 +818,9 @@ class Runnable(ABC, Generic[Input, Output]): Args: input: The input to the `Runnable`. config: A config to use when invoking the `Runnable`. - The config supports standard keys like ``'tags'``, ``'metadata'`` for - tracing purposes, ``'max_concurrency'`` for controlling how much work to - do in parallel, and other keys. Please refer to the ``RunnableConfig`` + The config supports standard keys like `'tags'`, `'metadata'` for + tracing purposes, `'max_concurrency'` for controlling how much work to + do in parallel, and other keys. Please refer to the `RunnableConfig` for more details. Defaults to `None`. Returns: @@ -838,9 +838,9 @@ class Runnable(ABC, Generic[Input, Output]): Args: input: The input to the `Runnable`. config: A config to use when invoking the `Runnable`. - The config supports standard keys like ``'tags'``, ``'metadata'`` for - tracing purposes, ``'max_concurrency'`` for controlling how much work to - do in parallel, and other keys. Please refer to the ``RunnableConfig`` + The config supports standard keys like `'tags'`, `'metadata'` for + tracing purposes, `'max_concurrency'` for controlling how much work to + do in parallel, and other keys. Please refer to the `RunnableConfig` for more details. Defaults to `None`. Returns: @@ -866,10 +866,10 @@ class Runnable(ABC, Generic[Input, Output]): Args: inputs: A list of inputs to the `Runnable`. config: A config to use when invoking the `Runnable`. The config supports - standard keys like ``'tags'``, ``'metadata'`` for - tracing purposes, ``'max_concurrency'`` for controlling how much work + standard keys like `'tags'`, `'metadata'` for + tracing purposes, `'max_concurrency'` for controlling how much work to do in parallel, and other keys. Please refer to the - ``RunnableConfig`` for more details. Defaults to `None`. + `RunnableConfig` for more details. Defaults to `None`. return_exceptions: Whether to return exceptions instead of raising them. Defaults to `False`. **kwargs: Additional keyword arguments to pass to the `Runnable`. @@ -933,9 +933,9 @@ class Runnable(ABC, Generic[Input, Output]): Args: inputs: A list of inputs to the `Runnable`. config: A config to use when invoking the `Runnable`. - The config supports standard keys like ``'tags'``, ``'metadata'`` for - tracing purposes, ``'max_concurrency'`` for controlling how much work to - do in parallel, and other keys. Please refer to the ``RunnableConfig`` + The config supports standard keys like `'tags'`, `'metadata'` for + tracing purposes, `'max_concurrency'` for controlling how much work to + do in parallel, and other keys. Please refer to the `RunnableConfig` for more details. Defaults to `None`. return_exceptions: Whether to return exceptions instead of raising them. Defaults to `False`. @@ -990,7 +990,7 @@ class Runnable(ABC, Generic[Input, Output]): return_exceptions: bool = False, **kwargs: Any | None, ) -> list[Output]: - """Default implementation runs `ainvoke` in parallel using ``asyncio.gather``. + """Default implementation runs `ainvoke` in parallel using `asyncio.gather`. The default implementation of `batch` works well for IO bound runnables. @@ -1000,9 +1000,9 @@ class Runnable(ABC, Generic[Input, Output]): Args: inputs: A list of inputs to the `Runnable`. config: A config to use when invoking the `Runnable`. - The config supports standard keys like ``'tags'``, ``'metadata'`` for - tracing purposes, ``'max_concurrency'`` for controlling how much work to - do in parallel, and other keys. Please refer to the ``RunnableConfig`` + The config supports standard keys like `'tags'`, `'metadata'` for + tracing purposes, `'max_concurrency'` for controlling how much work to + do in parallel, and other keys. Please refer to the `RunnableConfig` for more details. Defaults to `None`. return_exceptions: Whether to return exceptions instead of raising them. Defaults to `False`. @@ -1064,9 +1064,9 @@ class Runnable(ABC, Generic[Input, Output]): Args: inputs: A list of inputs to the `Runnable`. config: A config to use when invoking the `Runnable`. - The config supports standard keys like ``'tags'``, ``'metadata'`` for - tracing purposes, ``'max_concurrency'`` for controlling how much work to - do in parallel, and other keys. Please refer to the ``RunnableConfig`` + The config supports standard keys like `'tags'`, `'metadata'` for + tracing purposes, `'max_concurrency'` for controlling how much work to + do in parallel, and other keys. Please refer to the `RunnableConfig` for more details. Defaults to `None`. return_exceptions: Whether to return exceptions instead of raising them. Defaults to `False`. @@ -1213,7 +1213,7 @@ class Runnable(ABC, Generic[Input, Output]): input: The input to the `Runnable`. config: The config to use for the `Runnable`. diff: Whether to yield diffs between each step or the current state. - with_streamed_output_list: Whether to yield the ``streamed_output`` list. + with_streamed_output_list: Whether to yield the `streamed_output` list. include_names: Only include logs with these names. include_types: Only include logs with these types. include_tags: Only include logs with these tags. @@ -1223,7 +1223,7 @@ class Runnable(ABC, Generic[Input, Output]): **kwargs: Additional keyword arguments to pass to the `Runnable`. Yields: - A ``RunLogPatch`` or ``RunLog`` object. + A `RunLogPatch` or `RunLog` object. """ stream = LogStreamCallbackHandler( @@ -1271,24 +1271,24 @@ class Runnable(ABC, Generic[Input, Output]): about the progress of the `Runnable`, including `StreamEvent` from intermediate results. - A ``StreamEvent`` is a dictionary with the following schema: + A `StreamEvent` is a dictionary with the following schema: - - ``event``: **str** - Event names are of the format: - ``on_[runnable_type]_(start|stream|end)``. + - `event`: **str** - Event names are of the format: + `on_[runnable_type]_(start|stream|end)`. - `name`: **str** - The name of the `Runnable` that generated the event. - - ``run_id``: **str** - randomly generated ID associated with the given - execution of the `Runnable` that emitted the event. A child `Runnable` that gets - invoked as part of the execution of a parent `Runnable` is assigned its own - unique ID. - - ``parent_ids``: **list[str]** - The IDs of the parent runnables that generated - the event. The root `Runnable` will have an empty list. The order of the parent - IDs is from the root to the immediate parent. Only available for v2 version of - the API. The v1 version of the API will return an empty list. - - ``tags``: **list[str] | None** - The tags of the `Runnable` that generated - the event. - - ``metadata``: **dict[str, Any] | None** - The metadata of the `Runnable` that - generated the event. - - ``data``: **dict[str, Any]** + - `run_id`: **str** - randomly generated ID associated with the given + execution of the `Runnable` that emitted the event. A child `Runnable` that gets + invoked as part of the execution of a parent `Runnable` is assigned its own + unique ID. + - `parent_ids`: **list[str]** - The IDs of the parent runnables that generated + the event. The root `Runnable` will have an empty list. The order of the parent + IDs is from the root to the immediate parent. Only available for v2 version of + the API. The v1 version of the API will return an empty list. + - `tags`: **list[str] | None** - The tags of the `Runnable` that generated + the event. + - `metadata`: **dict[str, Any] | None** - The metadata of the `Runnable` that + generated the event. + - `data`: **dict[str, Any]** Below is a table that illustrates some events that might be emitted by various chains. Metadata fields have been omitted from the table for brevity. @@ -1300,35 +1300,35 @@ class Runnable(ABC, Generic[Input, Output]): +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ | event | name | chunk | input | output | +==========================+==================+=====================================+===================================================+=====================================================+ - | ``on_chat_model_start`` | [model name] | | ``{"messages": [[SystemMessage, HumanMessage]]}`` | | + | `on_chat_model_start` | [model name] | | `{"messages": [[SystemMessage, HumanMessage]]}` | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_chat_model_stream`` | [model name] | ``AIMessageChunk(content="hello")`` | | | + | `on_chat_model_stream` | [model name] | `AIMessageChunk(content="hello")` | | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_chat_model_end`` | [model name] | | ``{"messages": [[SystemMessage, HumanMessage]]}`` | ``AIMessageChunk(content="hello world")`` | + | `on_chat_model_end` | [model name] | | `{"messages": [[SystemMessage, HumanMessage]]}` | `AIMessageChunk(content="hello world")` | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_llm_start`` | [model name] | | ``{'input': 'hello'}`` | | + | `on_llm_start` | [model name] | | `{'input': 'hello'}` | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_llm_stream`` | [model name] | ``'Hello' `` | | | + | `on_llm_stream` | [model name] | `'Hello' ` | | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_llm_end`` | [model name] | | ``'Hello human!'`` | | + | `on_llm_end` | [model name] | | `'Hello human!'` | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_chain_start`` | format_docs | | | | + | `on_chain_start` | format_docs | | | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_chain_stream`` | format_docs | ``'hello world!, goodbye world!'`` | | | + | `on_chain_stream` | format_docs | `'hello world!, goodbye world!'` | | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_chain_end`` | format_docs | | ``[Document(...)]`` | ``'hello world!, goodbye world!'`` | + | `on_chain_end` | format_docs | | `[Document(...)]` | `'hello world!, goodbye world!'` | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_tool_start`` | some_tool | | ``{"x": 1, "y": "2"}`` | | + | `on_tool_start` | some_tool | | `{"x": 1, "y": "2"}` | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_tool_end`` | some_tool | | | ``{"x": 1, "y": "2"}`` | + | `on_tool_end` | some_tool | | | `{"x": 1, "y": "2"}` | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_retriever_start`` | [retriever name] | | ``{"query": "hello"}`` | | + | `on_retriever_start` | [retriever name] | | `{"query": "hello"}` | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_retriever_end`` | [retriever name] | | ``{"query": "hello"}`` | ``[Document(...), ..]`` | + | `on_retriever_end` | [retriever name] | | `{"query": "hello"}` | `[Document(...), ..]` | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_prompt_start`` | [template_name] | | ``{"question": "hello"}`` | | + | `on_prompt_start` | [template_name] | | `{"question": "hello"}` | | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ - | ``on_prompt_end`` | [template_name] | | ``{"question": "hello"}`` | ``ChatPromptValue(messages: [SystemMessage, ...])`` | + | `on_prompt_end` | [template_name] | | `{"question": "hello"}` | `ChatPromptValue(messages: [SystemMessage, ...])` | +--------------------------+------------------+-------------------------------------+---------------------------------------------------+-----------------------------------------------------+ In addition to the standard events, users can also dispatch custom events (see example below). @@ -1347,7 +1347,7 @@ class Runnable(ABC, Generic[Input, Output]): Here are declarations associated with the standard events shown above: - ``format_docs``: + `format_docs`: ```python def format_docs(docs: list[Document]) -> str: @@ -1358,7 +1358,7 @@ class Runnable(ABC, Generic[Input, Output]): format_docs = RunnableLambda(format_docs) ``` - ``some_tool``: + `some_tool`: ```python @tool @@ -1367,7 +1367,7 @@ class Runnable(ABC, Generic[Input, Output]): return {"x": x, "y": y} ``` - ``prompt``: + `prompt`: ```python template = ChatPromptTemplate.from_messages( @@ -1447,17 +1447,17 @@ class Runnable(ABC, Generic[Input, Output]): async for event in slow_thing.astream_events("some_input", version="v2"): print(event) - ``` + `` Args: input: The input to the `Runnable`. config: The config to use for the `Runnable`. version: The version of the schema to use either `'v2'` or `'v1'`. - Users should use `'v2'`. - `'v1'` is for backwards compatibility and will be deprecated - in `0.4.0`. - No default will be assigned until the API is stabilized. - custom events will only be surfaced in `'v2'`. + Users should use `'v2'`. + `'v1'` is for backwards compatibility and will be deprecated + in `0.4.0`. + No default will be assigned until the API is stabilized. + custom events will only be surfaced in `'v2'`. include_names: Only include events from `Runnable` objects with matching names. include_types: Only include events from `Runnable` objects with matching types. include_tags: Only include events from `Runnable` objects with matching tags. @@ -1864,8 +1864,8 @@ class Runnable(ABC, Generic[Input, Output]): stop_after_attempt: The maximum number of attempts to make before giving up. Defaults to 3. exponential_jitter_params: Parameters for - ``tenacity.wait_exponential_jitter``. Namely: ``initial``, ``max``, - ``exp_base``, and ``jitter`` (all float values). + `tenacity.wait_exponential_jitter`. Namely: `initial`, `max`, + `exp_base`, and `jitter` (all float values). Returns: A new Runnable that retries the original Runnable on exceptions. @@ -1950,7 +1950,7 @@ class Runnable(ABC, Generic[Input, Output]): fallbacks: A sequence of runnables to try if the original `Runnable` fails. exceptions_to_handle: A tuple of exception types to handle. - Defaults to ``(Exception,)``. + Defaults to `(Exception,)`. exception_key: If string is specified then handled exceptions will be passed to fallbacks as part of the input under the specified key. If `None`, exceptions will not be passed to fallbacks. @@ -2025,7 +2025,7 @@ class Runnable(ABC, Generic[Input, Output]): ) -> Output: """Call with config. - Helper method to transform an ``Input`` value to an ``Output`` value, + Helper method to transform an `Input` value to an `Output` value, with callbacks. Use this method to implement `invoke` in subclasses. @@ -2076,7 +2076,7 @@ class Runnable(ABC, Generic[Input, Output]): ) -> Output: """Async call with config. - Helper method to transform an ``Input`` value to an ``Output`` value, + Helper method to transform an `Input` value to an `Output` value, with callbacks. Use this method to implement `ainvoke` in subclasses. @@ -2123,7 +2123,7 @@ class Runnable(ABC, Generic[Input, Output]): ) -> list[Output]: """Transform a list of inputs to a list of outputs, with callbacks. - Helper method to transform an ``Input`` value to an ``Output`` value, + Helper method to transform an `Input` value to an `Output` value, with callbacks. Use this method to implement `invoke` in subclasses. """ @@ -2191,7 +2191,7 @@ class Runnable(ABC, Generic[Input, Output]): ) -> list[Output]: """Transform a list of inputs to a list of outputs, with callbacks. - Helper method to transform an ``Input`` value to an ``Output`` value, + Helper method to transform an `Input` value to an `Output` value, with callbacks. Use this method to implement `invoke` in subclasses. @@ -2261,10 +2261,10 @@ class Runnable(ABC, Generic[Input, Output]): ) -> Iterator[Output]: """Transform a stream with config. - Helper method to transform an ``Iterator`` of ``Input`` values into an - ``Iterator`` of ``Output`` values, with callbacks. + Helper method to transform an `Iterator` of `Input` values into an + `Iterator` of `Output` values, with callbacks. - Use this to implement `stream` or ``transform`` in `Runnable` subclasses. + Use this to implement `stream` or `transform` in `Runnable` subclasses. """ # tee the input so we can iterate over it twice @@ -2358,10 +2358,10 @@ class Runnable(ABC, Generic[Input, Output]): ) -> AsyncIterator[Output]: """Transform a stream with config. - Helper method to transform an Async ``Iterator`` of ``Input`` values into an - Async ``Iterator`` of ``Output`` values, with callbacks. + Helper method to transform an Async `Iterator` of `Input` values into an + Async `Iterator` of `Output` values, with callbacks. - Use this to implement `astream` or ``atransform`` in `Runnable` subclasses. + Use this to implement `astream` or `atransform` in `Runnable` subclasses. """ # tee the input so we can iterate over it twice @@ -2454,12 +2454,12 @@ class Runnable(ABC, Generic[Input, Output]): ) -> BaseTool: """Create a `BaseTool` from a `Runnable`. - ``as_tool`` will instantiate a `BaseTool` with a name, description, and - ``args_schema`` from a `Runnable`. Where possible, schemas are inferred - from ``runnable.get_input_schema``. Alternatively (e.g., if the + `as_tool` will instantiate a `BaseTool` with a name, description, and + `args_schema` from a `Runnable`. Where possible, schemas are inferred + from `runnable.get_input_schema`. Alternatively (e.g., if the `Runnable` takes a dict as input and the specific dict keys are not typed), - the schema can be specified directly with ``args_schema``. You can also - pass ``arg_types`` to just specify the required arguments and their types. + the schema can be specified directly with `args_schema`. You can also + pass `arg_types` to just specify the required arguments and their types. Args: args_schema: The schema for the tool. Defaults to `None`. @@ -2491,7 +2491,7 @@ class Runnable(ABC, Generic[Input, Output]): as_tool.invoke({"a": 3, "b": [1, 2]}) ``` - `dict` input, specifying schema via ``args_schema``: + `dict` input, specifying schema via `args_schema`: ```python from typing import Any @@ -2512,7 +2512,7 @@ class Runnable(ABC, Generic[Input, Output]): as_tool.invoke({"a": 3, "b": [1, 2]}) ``` - `dict` input, specifying schema via ``arg_types``: + `dict` input, specifying schema via `arg_types`: ```python from typing import Any @@ -2592,7 +2592,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]): """Configure particular `Runnable` fields at runtime. Args: - **kwargs: A dictionary of ``ConfigurableField`` instances to configure. + **kwargs: A dictionary of `ConfigurableField` instances to configure. Raises: ValueError: If a configuration key is not found in the `Runnable`. @@ -2651,11 +2651,11 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]): """Configure alternatives for `Runnable` objects that can be set at runtime. Args: - which: The ``ConfigurableField`` instance that will be used to select the + which: The `ConfigurableField` instance that will be used to select the alternative. default_key: The default key to use if no alternative is selected. Defaults to `'default'`. - prefix_keys: Whether to prefix the keys with the ``ConfigurableField`` id. + prefix_keys: Whether to prefix the keys with the `ConfigurableField` id. Defaults to `False`. **kwargs: A dictionary of keys to `Runnable` instances or callables that return `Runnable` instances. @@ -2789,7 +2789,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]): as it is used in virtually every chain. A `RunnableSequence` can be instantiated directly or more commonly by using the - ``|`` operator where either the left or right operands (or both) must be a + `|` operator where either the left or right operands (or both) must be a `Runnable`. Any `RunnableSequence` automatically supports sync, async, batch. @@ -2802,7 +2802,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]): `RunnableSequence` in order. A `RunnableSequence` preserves the streaming properties of its components, so if - all components of the sequence implement a ``transform`` method -- which + all components of the sequence implement a `transform` method -- which is the method that implements the logic to map a streaming input to a streaming output -- then the sequence will be able to stream input to output! @@ -2811,12 +2811,12 @@ class RunnableSequence(RunnableSerializable[Input, Output]): multiple blocking components, streaming begins after the last one. !!! note - ``RunnableLambdas`` do not support ``transform`` by default! So if you need to - use a ``RunnableLambdas`` be careful about where you place them in a + `RunnableLambdas` do not support `transform` by default! So if you need to + use a `RunnableLambdas` be careful about where you place them in a `RunnableSequence` (if you need to use the `stream`/`astream` methods). If you need arbitrary logic and need streaming, you can subclass - Runnable, and implement ``transform`` for whatever logic you need. + Runnable, and implement `transform` for whatever logic you need. Here is a simple example that uses simple functions to illustrate the use of `RunnableSequence`: @@ -2920,7 +2920,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] @@ -3532,15 +3532,15 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]): Returns a mapping of their outputs. - ``RunnableParallel`` is one of the two main composition primitives for the LCEL, + `RunnableParallel` is one of the two main composition primitives for the LCEL, alongside `RunnableSequence`. It invokes `Runnable`s concurrently, providing the same input to each. - A ``RunnableParallel`` can be instantiated directly or by using a dict literal + A `RunnableParallel` can be instantiated directly or by using a dict literal within a sequence. Here is a simple example that uses functions to illustrate the use of - ``RunnableParallel``: + `RunnableParallel`: ```python from langchain_core.runnables import RunnableLambda @@ -3583,7 +3583,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]): await sequence.abatch([1, 2, 3]) ``` - ``RunnableParallel`` makes it easy to run `Runnable`s in parallel. In the below + `RunnableParallel` makes it easy to run `Runnable`s in parallel. In the below example, we simultaneously stream output from two different `Runnable` objects: ```python @@ -3626,7 +3626,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]): | Callable[[Input], Any] | Mapping[str, Runnable[Input, Any] | Callable[[Input], Any]], ) -> None: - """Create a ``RunnableParallel``. + """Create a `RunnableParallel`. Args: steps__: The steps to include. Defaults to `None`. @@ -3651,7 +3651,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] @@ -4055,22 +4055,22 @@ RunnableMap = RunnableParallel class RunnableGenerator(Runnable[Input, Output]): """`Runnable` that runs a generator function. - ``RunnableGenerator``s can be instantiated directly or by using a generator within + `RunnableGenerator`s can be instantiated directly or by using a generator within a sequence. - ``RunnableGenerator``s can be used to implement custom behavior, such as custom + `RunnableGenerator`s can be used to implement custom behavior, such as custom output parsers, while preserving streaming capabilities. Given a generator function - with a signature ``Iterator[A] -> Iterator[B]``, wrapping it in a - ``RunnableGenerator`` allows it to emit output chunks as soon as they are streamed + with a signature `Iterator[A] -> Iterator[B]`, wrapping it in a + `RunnableGenerator` allows it to emit output chunks as soon as they are streamed in from the previous step. !!! note - If a generator function has a ``signature A -> Iterator[B]``, such that it + If a generator function has a `signature A -> Iterator[B]`, such that it requires its input from the previous step to be completed before emitting chunks (e.g., most LLMs need the entire prompt available to start generating), it can - instead be wrapped in a ``RunnableLambda``. + instead be wrapped in a `RunnableLambda`. - Here is an example to show the basic mechanics of a ``RunnableGenerator``: + Here is an example to show the basic mechanics of a `RunnableGenerator`: ```python from typing import Any, AsyncIterator, Iterator @@ -4100,7 +4100,7 @@ class RunnableGenerator(Runnable[Input, Output]): [p async for p in runnable.astream(None)] # ["Have", " a", " nice", " day"] ``` - ``RunnableGenerator`` makes it easy to implement custom behavior within a streaming + `RunnableGenerator` makes it easy to implement custom behavior within a streaming context. Below we show an example: ```python @@ -4153,7 +4153,7 @@ class RunnableGenerator(Runnable[Input, Output]): *, name: str | None = None, ) -> None: - """Initialize a ``RunnableGenerator``. + """Initialize a `RunnableGenerator`. Args: transform: The transform function. @@ -4355,20 +4355,20 @@ class RunnableGenerator(Runnable[Input, Output]): class RunnableLambda(Runnable[Input, Output]): - """``RunnableLambda`` converts a python callable into a `Runnable`. + """`RunnableLambda` converts a python callable into a `Runnable`. - Wrapping a callable in a ``RunnableLambda`` makes the callable usable + Wrapping a callable in a `RunnableLambda` makes the callable usable within either a sync or async context. - ``RunnableLambda`` can be composed as any other `Runnable` and provides + `RunnableLambda` can be composed as any other `Runnable` and provides seamless integration with LangChain tracing. - ``RunnableLambda`` is best suited for code that does not need to support + `RunnableLambda` is best suited for code that does not need to support streaming. If you need to support streaming (i.e., be able to operate - on chunks of inputs and yield chunks of outputs), use ``RunnableGenerator`` + on chunks of inputs and yield chunks of outputs), use `RunnableGenerator` instead. - Note that if a ``RunnableLambda`` returns an instance of `Runnable`, that + Note that if a `RunnableLambda` returns an instance of `Runnable`, that instance is invoked (or streamed) during execution. Examples: @@ -4427,7 +4427,7 @@ class RunnableLambda(Runnable[Input, Output]): | None = None, name: str | None = None, ) -> None: - """Create a ``RunnableLambda`` from a callable, and async callable or both. + """Create a `RunnableLambda` from a callable, and async callable or both. Accepts both sync and async variants to allow providing efficient implementations for sync and async execution. @@ -4439,8 +4439,8 @@ class RunnableLambda(Runnable[Input, Output]): name: The name of the `Runnable`. Defaults to `None`. Raises: - TypeError: If the ``func`` is not a callable type. - TypeError: If both ``func`` and ``afunc`` are provided. + TypeError: If the `func` is not a callable type. + TypeError: If both `func` and `afunc` are provided. """ if afunc is not None: @@ -5100,10 +5100,10 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]): `Runnable` that calls another `Runnable` for each element of the input sequence. - Use only if creating a new ``RunnableEach`` subclass with different `__init__` + Use only if creating a new `RunnableEach` subclass with different `__init__` args. - See documentation for ``RunnableEach`` for more details. + See documentation for `RunnableEach` for more details. """ @@ -5180,7 +5180,7 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] @@ -5243,7 +5243,7 @@ class RunnableEach(RunnableEachBase[Input, Output]): It allows you to call multiple inputs with the bounded `Runnable`. - ``RunnableEach`` makes it easy to run multiple inputs for the `Runnable`. + `RunnableEach` makes it easy to run multiple inputs for the `Runnable`. In the below example, we associate and run three inputs with a `Runnable`: @@ -5356,10 +5356,10 @@ class RunnableEach(RunnableEachBase[Input, Output]): class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[no-redef] """`Runnable` that delegates calls to another `Runnable` with a set of kwargs. - Use only if creating a new ``RunnableBinding`` subclass with different `__init__` + Use only if creating a new `RunnableBinding` subclass with different `__init__` args. - See documentation for ``RunnableBinding`` for more details. + See documentation for `RunnableBinding` for more details. """ @@ -5387,13 +5387,13 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[ custom_input_type: Any | None = None """Override the input type of the underlying `Runnable` with a custom type. - The type can be a pydantic model, or a type annotation (e.g., ``list[str]``). + The type can be a pydantic model, or a type annotation (e.g., `list[str]`). """ # Union[Type[Output], BaseModel] + things like list[str] custom_output_type: Any | None = None """Override the output type of the underlying `Runnable` with a custom type. - The type can be a pydantic model, or a type annotation (e.g., ``list[str]``). + The type can be a pydantic model, or a type annotation (e.g., `list[str]`). """ model_config = ConfigDict( @@ -5412,14 +5412,14 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[ custom_output_type: type[Output] | BaseModel | None = None, **other_kwargs: Any, ) -> None: - """Create a ``RunnableBinding`` from a `Runnable` and kwargs. + """Create a `RunnableBinding` from a `Runnable` and kwargs. Args: bound: The underlying `Runnable` that this `Runnable` delegates calls to. kwargs: optional kwargs to pass to the underlying `Runnable`, when running the underlying `Runnable` (e.g., via `invoke`, `batch`, - ``transform``, or `stream` or async variants) + `transform`, or `stream` or async variants) Defaults to `None`. config: optional config to bind to the underlying `Runnable`. Defaults to `None`. @@ -5503,7 +5503,7 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[ """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] @@ -5758,25 +5758,25 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-redef] """Wrap a `Runnable` with additional functionality. - A ``RunnableBinding`` can be thought of as a "runnable decorator" that + A `RunnableBinding` can be thought of as a "runnable decorator" that preserves the essential features of `Runnable`; i.e., batching, streaming, and async support, while adding additional functionality. - Any class that inherits from `Runnable` can be bound to a ``RunnableBinding``. - Runnables expose a standard set of methods for creating ``RunnableBindings`` - or sub-classes of ``RunnableBindings`` (e.g., ``RunnableRetry``, - ``RunnableWithFallbacks``) that add additional functionality. + Any class that inherits from `Runnable` can be bound to a `RunnableBinding`. + Runnables expose a standard set of methods for creating `RunnableBindings` + or sub-classes of `RunnableBindings` (e.g., `RunnableRetry`, + `RunnableWithFallbacks`) that add additional functionality. These methods include: - - ``bind``: Bind kwargs to pass to the underlying `Runnable` when running it. - - ``with_config``: Bind config to pass to the underlying `Runnable` when running - it. - - ``with_listeners``: Bind lifecycle listeners to the underlying `Runnable`. - - ``with_types``: Override the input and output types of the underlying - `Runnable`. - - ``with_retry``: Bind a retry policy to the underlying `Runnable`. - - ``with_fallbacks``: Bind a fallback policy to the underlying `Runnable`. + - `bind`: Bind kwargs to pass to the underlying `Runnable` when running it. + - `with_config`: Bind config to pass to the underlying `Runnable` when running + it. + - `with_listeners`: Bind lifecycle listeners to the underlying `Runnable`. + - `with_types`: Override the input and output types of the underlying + `Runnable`. + - `with_retry`: Bind a retry policy to the underlying `Runnable`. + - `with_fallbacks`: Bind a fallback policy to the underlying `Runnable`. Example: `bind`: Bind kwargs to pass to the underlying `Runnable` when running it. @@ -5793,7 +5793,7 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re runnable_binding = model.bind(stop=["-"]) runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot` ``` - Can also be done by instantiating a ``RunnableBinding`` directly (not + Can also be done by instantiating a `RunnableBinding` directly (not recommended): ```python @@ -6062,7 +6062,7 @@ def chain( Any runnables called by the function will be traced as dependencies. Args: - func: A ``Callable``. + func: A `Callable`. Returns: A `Runnable`. diff --git a/libs/core/langchain_core/runnables/branch.py b/libs/core/langchain_core/runnables/branch.py index 289f2e89171..deba85521c0 100644 --- a/libs/core/langchain_core/runnables/branch.py +++ b/libs/core/langchain_core/runnables/branch.py @@ -149,7 +149,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index 73d4cbdc162..da4e75f72fb 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -75,7 +75,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index 1082ecca588..61f1753d8dd 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -47,8 +47,8 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): of a chain of Runnables. Fallbacks are tried in order until one succeeds or all fail. - While you can instantiate a ``RunnableWithFallbacks`` directly, it is usually - more convenient to use the ``with_fallbacks`` method on a Runnable. + While you can instantiate a `RunnableWithFallbacks` directly, it is usually + more convenient to use the `with_fallbacks` method on a Runnable. Example: ```python @@ -146,7 +146,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] diff --git a/libs/core/langchain_core/runnables/graph_ascii.py b/libs/core/langchain_core/runnables/graph_ascii.py index d2abbbeb37a..cc06d7f087f 100644 --- a/libs/core/langchain_core/runnables/graph_ascii.py +++ b/libs/core/langchain_core/runnables/graph_ascii.py @@ -62,8 +62,8 @@ class AsciiCanvas: """Create an ASCII canvas. Args: - cols: number of columns in the canvas. Should be ``> 1``. - lines: number of lines in the canvas. Should be ``> 1``. + cols: number of columns in the canvas. Should be `> 1`. + lines: number of lines in the canvas. Should be `> 1`. Raises: ValueError: if canvas dimensions are invalid. @@ -90,9 +90,9 @@ class AsciiCanvas: """Create a point on ASCII canvas. Args: - x: x coordinate. Should be ``>= 0`` and ``<`` number of columns in + x: x coordinate. Should be `>= 0` and `<` number of columns in the canvas. - y: y coordinate. Should be ``>= 0`` an ``<`` number of lines in the + y: y coordinate. Should be `>= 0` an `<` number of lines in the canvas. char: character to place in the specified point on the canvas. diff --git a/libs/core/langchain_core/runnables/graph_png.py b/libs/core/langchain_core/runnables/graph_png.py index 73e8b495ef6..a01a1c475f3 100644 --- a/libs/core/langchain_core/runnables/graph_png.py +++ b/libs/core/langchain_core/runnables/graph_png.py @@ -15,7 +15,7 @@ except ImportError: class PngDrawer: """Helper class to draw a state graph into a PNG file. - It requires ``graphviz`` and ``pygraphviz`` to be installed. + It requires `graphviz` and `pygraphviz` to be installed. Example: ```python @@ -126,10 +126,10 @@ class PngDrawer: output_path: The path to save the PNG. If `None`, PNG bytes are returned. Raises: - ImportError: If ``pygraphviz`` is not installed. + ImportError: If `pygraphviz` is not installed. Returns: - The PNG bytes if ``output_path`` is None, else None. + The PNG bytes if `output_path` is None, else None. """ if not _HAS_PYGRAPHVIZ: msg = "Install pygraphviz to draw graphs: `pip install pygraphviz`." diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index 2a149531853..ce77aac9450 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -57,17 +57,17 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] In this case, the invocation would look like this: `with_history.invoke(..., config={"configurable": {"session_id": "bar"}})` - ; e.g., ``{"configurable": {"session_id": ""}}``. + ; e.g., `{"configurable": {"session_id": ""}}`. The configuration can be customized by passing in a list of - ``ConfigurableFieldSpec`` objects to the ``history_factory_config`` parameter (see + `ConfigurableFieldSpec` objects to the `history_factory_config` parameter (see example below). In the examples, we will use a chat message history with an in-memory implementation to make it easy to experiment and see the results. For production use cases, you will want to use a persistent implementation - of chat message history, such as ``RedisChatMessageHistory``. + of chat message history, such as `RedisChatMessageHistory`. Example: Chat message history with an in-memory implementation for testing. @@ -224,7 +224,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] get_session_history: GetSessionHistoryCallable """Function that returns a new BaseChatMessageHistory. - This function should either take a single positional argument ``session_id`` of type + This function should either take a single positional argument `session_id` of type string and return a corresponding chat message history instance""" input_messages_key: str | None = None """Must be specified if the base runnable accepts a dict as input. @@ -237,7 +237,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] separate key for historical messages.""" history_factory_config: Sequence[ConfigurableFieldSpec] """Configure fields that should be passed to the chat history factory. - See ``ConfigurableFieldSpec`` for more details.""" + See `ConfigurableFieldSpec` for more details.""" def __init__( self, @@ -263,8 +263,8 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] 1. A list of `BaseMessage` 2. A dict with one key for all messages 3. A dict with one key for the current input string/message(s) and - a separate key for historical messages. If the input key points - to a string, it will be treated as a `HumanMessage` in history. + a separate key for historical messages. If the input key points + to a string, it will be treated as a `HumanMessage` in history. Must return as output one of: @@ -302,11 +302,11 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef] history_messages_key: Must be specified if the base runnable accepts a dict as input and expects a separate key for historical messages. history_factory_config: Configure fields that should be passed to the - chat history factory. See ``ConfigurableFieldSpec`` for more details. + chat history factory. See `ConfigurableFieldSpec` for more details. Specifying these allows you to pass multiple config keys into the get_session_history factory. **kwargs: Arbitrary additional kwargs to pass to parent class - ``RunnableBindingBase`` init. + `RunnableBindingBase` init. """ history_chain: Runnable = RunnableLambda( diff --git a/libs/core/langchain_core/runnables/passthrough.py b/libs/core/langchain_core/runnables/passthrough.py index e54ac53a329..76a0aa2126a 100644 --- a/libs/core/langchain_core/runnables/passthrough.py +++ b/libs/core/langchain_core/runnables/passthrough.py @@ -188,7 +188,7 @@ class RunnablePassthrough(RunnableSerializable[Other, Other]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] @@ -395,7 +395,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): """Create a RunnableAssign. Args: - mapper: A ``RunnableParallel`` instance that will be used to transform the + mapper: A `RunnableParallel` instance that will be used to transform the input dictionary. """ super().__init__(mapper=mapper, **kwargs) @@ -412,7 +412,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] @@ -717,7 +717,7 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] diff --git a/libs/core/langchain_core/runnables/retry.py b/libs/core/langchain_core/runnables/retry.py index 7fe626f8a3d..ed0adf8eaf7 100644 --- a/libs/core/langchain_core/runnables/retry.py +++ b/libs/core/langchain_core/runnables/retry.py @@ -33,7 +33,7 @@ U = TypeVar("U") class ExponentialJitterParams(TypedDict, total=False): - """Parameters for ``tenacity.wait_exponential_jitter``.""" + """Parameters for `tenacity.wait_exponential_jitter`.""" initial: float """Initial wait.""" @@ -125,8 +125,8 @@ class RunnableRetry(RunnableBindingBase[Input, Output]): # type: ignore[no-rede """Whether to add jitter to the exponential backoff.""" exponential_jitter_params: ExponentialJitterParams | None = None - """Parameters for ``tenacity.wait_exponential_jitter``. Namely: ``initial``, - ``max``, ``exp_base``, and ``jitter`` (all float values). + """Parameters for `tenacity.wait_exponential_jitter`. Namely: `initial`, + `max`, `exp_base`, and `jitter` (all float values). """ max_attempt_number: int = 3 diff --git a/libs/core/langchain_core/runnables/router.py b/libs/core/langchain_core/runnables/router.py index 0bffd159836..89b500d5e09 100644 --- a/libs/core/langchain_core/runnables/router.py +++ b/libs/core/langchain_core/runnables/router.py @@ -99,7 +99,7 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]): """Get the namespace of the langchain object. Returns: - ``["langchain", "schema", "runnable"]`` + `["langchain", "schema", "runnable"]` """ return ["langchain", "schema", "runnable"] diff --git a/libs/core/langchain_core/runnables/utils.py b/libs/core/langchain_core/runnables/utils.py index f961ffb7e61..51318bee13d 100644 --- a/libs/core/langchain_core/runnables/utils.py +++ b/libs/core/langchain_core/runnables/utils.py @@ -120,10 +120,10 @@ def accepts_context(callable: Callable[..., Any]) -> bool: # noqa: A002 @lru_cache(maxsize=1) def asyncio_accepts_context() -> bool: - """Cache the result of checking if asyncio.create_task accepts a ``context`` arg. + """Cache the result of checking if asyncio.create_task accepts a `context` arg. Returns: - True if ``asyncio.create_task`` accepts a context argument, False otherwise. + True if `asyncio.create_task` accepts a context argument, False otherwise. """ return accepts_context(asyncio.create_task) diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py index c040b644e15..fe77c878915 100644 --- a/libs/core/langchain_core/tools/base.py +++ b/libs/core/langchain_core/tools/base.py @@ -294,7 +294,7 @@ def create_schema_from_function( Defaults to FILTERED_ARGS. parse_docstring: Whether to parse the function's docstring for descriptions for each argument. Defaults to `False`. - error_on_invalid_docstring: if ``parse_docstring`` is provided, configure + error_on_invalid_docstring: if `parse_docstring` is provided, configure whether to raise ValueError on invalid Google Style docstrings. Defaults to `False`. include_injected: Whether to include injected arguments in the schema. @@ -492,7 +492,7 @@ class ChildTool(BaseTool): """Initialize the tool. Raises: - TypeError: If ``args_schema`` is not a subclass of pydantic `BaseModel` or + TypeError: If `args_schema` is not a subclass of pydantic `BaseModel` or dict. """ if ( @@ -616,7 +616,7 @@ class ChildTool(BaseTool): The parsed and validated input. Raises: - ValueError: If string input is provided with JSON schema ``args_schema``. + ValueError: If string input is provided with JSON schema `args_schema`. ValueError: If InjectedToolCallId is required but `tool_call_id` is not provided. TypeError: If args_schema is not a Pydantic `BaseModel` or dict. diff --git a/libs/core/langchain_core/tools/convert.py b/libs/core/langchain_core/tools/convert.py index 30f7bfab4fa..392638c8aa5 100644 --- a/libs/core/langchain_core/tools/convert.py +++ b/libs/core/langchain_core/tools/convert.py @@ -91,11 +91,11 @@ def tool( description: Optional description for the tool. Precedence for the tool description value is as follows: - - ``description`` argument - (used even if docstring and/or ``args_schema`` are provided) + - `description` argument + (used even if docstring and/or `args_schema` are provided) - tool function docstring - (used even if ``args_schema`` is provided) - - ``args_schema`` description + (used even if `args_schema` is provided) + - `args_schema` description (used only if `description` / docstring are not provided) *args: Extra positional arguments. Must be empty. return_direct: Whether to return directly from the tool rather @@ -111,10 +111,10 @@ def tool( "content_and_artifact" then the output is expected to be a two-tuple corresponding to the (content, artifact) of a ToolMessage. Defaults to "content". - parse_docstring: if ``infer_schema`` and ``parse_docstring``, will attempt to + parse_docstring: if `infer_schema` and `parse_docstring`, will attempt to parse parameter descriptions from Google Style function docstrings. Defaults to `False`. - error_on_invalid_docstring: if ``parse_docstring`` is provided, configure + error_on_invalid_docstring: if `parse_docstring` is provided, configure whether to raise ValueError on invalid Google Style docstrings. Defaults to `True`. @@ -122,11 +122,11 @@ def tool( ValueError: If too many positional arguments are provided. ValueError: If a runnable is provided without a string name. ValueError: If the first argument is not a string or callable with - a ``__name__`` attribute. + a `__name__` attribute. ValueError: If the function does not have a docstring and description - is not provided and ``infer_schema`` is False. - ValueError: If ``parse_docstring`` is True and the function has an invalid - Google-style docstring and ``error_on_invalid_docstring`` is True. + is not provided and `infer_schema` is False. + ValueError: If `parse_docstring` is True and the function has an invalid + Google-style docstring and `error_on_invalid_docstring` is True. ValueError: If a Runnable is provided that does not have an object schema. Returns: @@ -194,7 +194,7 @@ def tool( "required": ["bar", "baz"], } - Note that parsing by default will raise ``ValueError`` if the docstring + Note that parsing by default will raise `ValueError` if the docstring is considered invalid. A docstring is considered invalid if it contains arguments not in the function signature, or is unable to be parsed into a summary and "Args:" blocks. Examples below: diff --git a/libs/core/langchain_core/tools/structured.py b/libs/core/langchain_core/tools/structured.py index 37d01b70413..ae9e001ea7e 100644 --- a/libs/core/langchain_core/tools/structured.py +++ b/libs/core/langchain_core/tools/structured.py @@ -158,10 +158,10 @@ class StructuredTool(BaseTool): "content_and_artifact" then the output is expected to be a two-tuple corresponding to the (content, artifact) of a ToolMessage. Defaults to "content". - parse_docstring: if ``infer_schema`` and ``parse_docstring``, will attempt + parse_docstring: if `infer_schema` and `parse_docstring`, will attempt to parse parameter descriptions from Google Style function docstrings. Defaults to `False`. - error_on_invalid_docstring: if ``parse_docstring`` is provided, configure + error_on_invalid_docstring: if `parse_docstring` is provided, configure whether to raise ValueError on invalid Google Style docstrings. Defaults to `False`. **kwargs: Additional arguments to pass to the tool @@ -173,7 +173,7 @@ class StructuredTool(BaseTool): ValueError: If the function is not provided. ValueError: If the function does not have a docstring and description is not provided. - TypeError: If the ``args_schema`` is not a `BaseModel` or dict. + TypeError: If the `args_schema` is not a `BaseModel` or dict. Examples: diff --git a/libs/core/langchain_core/tracers/event_stream.py b/libs/core/langchain_core/tracers/event_stream.py index 05a5e1175b4..67a2f81ce6c 100644 --- a/libs/core/langchain_core/tracers/event_stream.py +++ b/libs/core/langchain_core/tracers/event_stream.py @@ -473,7 +473,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand For both chat models and non-chat models (legacy LLMs). Raises: - ValueError: If the run type is not ``'llm'`` or ``'chat_model'``. + ValueError: If the run type is not `'llm'` or `'chat_model'`. """ run_info = self.run_map.pop(run_id) inputs_ = run_info.get("inputs") diff --git a/libs/core/langchain_core/tracers/log_stream.py b/libs/core/langchain_core/tracers/log_stream.py index 01a50553f7a..65a6a1baf7c 100644 --- a/libs/core/langchain_core/tracers/log_stream.py +++ b/libs/core/langchain_core/tracers/log_stream.py @@ -111,16 +111,16 @@ class RunLogPatch: self.ops = list(ops) def __add__(self, other: RunLogPatch | Any) -> RunLog: - """Combine two ``RunLogPatch`` instances. + """Combine two `RunLogPatch` instances. Args: - other: The other ``RunLogPatch`` to combine with. + other: The other `RunLogPatch` to combine with. Raises: - TypeError: If the other object is not a ``RunLogPatch``. + TypeError: If the other object is not a `RunLogPatch`. Returns: - A new ``RunLog`` representing the combination of the two. + A new `RunLog` representing the combination of the two. """ if type(other) is RunLogPatch: ops = self.ops + other.ops @@ -159,16 +159,16 @@ class RunLog(RunLogPatch): self.state = state def __add__(self, other: RunLogPatch | Any) -> RunLog: - """Combine two ``RunLog``s. + """Combine two `RunLog`s. Args: - other: The other ``RunLog`` or ``RunLogPatch`` to combine with. + other: The other `RunLog` or `RunLogPatch` to combine with. Raises: - TypeError: If the other object is not a ``RunLog`` or ``RunLogPatch``. + TypeError: If the other object is not a `RunLog` or `RunLogPatch`. Returns: - A new ``RunLog`` representing the combination of the two. + A new `RunLog` representing the combination of the two. """ if type(other) is RunLogPatch: ops = self.ops + other.ops @@ -184,13 +184,13 @@ class RunLog(RunLogPatch): @override def __eq__(self, other: object) -> bool: - """Check if two ``RunLog``s are equal. + """Check if two `RunLog`s are equal. Args: - other: The other ``RunLog`` to compare to. + other: The other `RunLog` to compare to. Returns: - True if the ``RunLog``s are equal, False otherwise. + True if the `RunLog`s are equal, False otherwise. """ # First compare that the state is the same if not isinstance(other, RunLog): @@ -666,7 +666,7 @@ async def _astream_log_implementation( ValueError: If the callbacks in the config are of an unexpected type. Yields: - The run log patches or states, depending on the value of ``diff``. + The run log patches or states, depending on the value of `diff`. """ # Assign the stream handler to the config config = ensure_config(config) diff --git a/libs/core/langchain_core/tracers/schemas.py b/libs/core/langchain_core/tracers/schemas.py index 03ecc39e741..f06ad0a80d4 100644 --- a/libs/core/langchain_core/tracers/schemas.py +++ b/libs/core/langchain_core/tracers/schemas.py @@ -18,10 +18,10 @@ from langchain_core._api import deprecated @deprecated("0.1.0", alternative="Use string instead.", removal="1.0") def RunTypeEnum() -> type[RunTypeEnumDep]: # noqa: N802 - """``RunTypeEnum``. + """`RunTypeEnum`. Returns: - The ``RunTypeEnum`` class. + The `RunTypeEnum` class. """ warnings.warn( "RunTypeEnum is deprecated. Please directly use a string instead" diff --git a/libs/core/langchain_core/utils/aiter.py b/libs/core/langchain_core/utils/aiter.py index 39bac19540e..c3613d0fb96 100644 --- a/libs/core/langchain_core/utils/aiter.py +++ b/libs/core/langchain_core/utils/aiter.py @@ -107,7 +107,7 @@ async def tee_peer( """An individual iterator of a `tee`. This function is a generator that yields items from the shared iterator - ``iterator``. It buffers items until the least advanced iterator has + `iterator`. It buffers items until the least advanced iterator has yielded them as well. The buffer is shared with all other peers. Args: @@ -153,14 +153,14 @@ async def tee_peer( class Tee(Generic[T]): - """Create ``n`` separate asynchronous iterators over ``iterable``. + """Create `n` separate asynchronous iterators over `iterable`. - This splits a single ``iterable`` into multiple iterators, each providing + This splits a single `iterable` into multiple iterators, each providing the same items in the same order. All child iterators may advance separately but share the same items - from ``iterable`` -- when the most advanced iterator retrieves an item, + from `iterable` -- when the most advanced iterator retrieves an item, it is buffered until the least advanced iterator has yielded it as well. - A ``tee`` works lazily and can handle an infinite ``iterable``, provided + A `tee` works lazily and can handle an infinite `iterable`, provided that all iterators advance. .. code-block:: python @@ -173,18 +173,18 @@ class Tee(Generic[T]): Unlike `itertools.tee`, `.tee` returns a custom type instead of a :py`tuple`. Like a tuple, it can be indexed, iterated and unpacked to get the child iterators. In addition, its `.tee.aclose` method - immediately closes all children, and it can be used in an ``async with`` context + immediately closes all children, and it can be used in an `async with` context for the same effect. - If ``iterable`` is an iterator and read elsewhere, ``tee`` will *not* - provide these items. Also, ``tee`` must internally buffer each item until the + If `iterable` is an iterator and read elsewhere, `tee` will *not* + provide these items. Also, `tee` must internally buffer each item until the last iterator has yielded it; if the most and least advanced iterator differ by most data, using a :py`list` is more efficient (but not lazy). - If the underlying iterable is concurrency safe (``anext`` may be awaited + If the underlying iterable is concurrency safe (`anext` may be awaited concurrently) the resulting iterators are concurrency safe as well. Otherwise, the iterators are safe if there is only ever one single "most advanced" iterator. - To enforce sequential use of ``anext``, provide a ``lock`` + To enforce sequential use of `anext`, provide a `lock` - e.g. an :py`asyncio.Lock` instance in an :py:mod:`asyncio` application - and access is automatically synchronised. @@ -197,7 +197,7 @@ class Tee(Generic[T]): *, lock: AbstractAsyncContextManager[Any] | None = None, ): - """Create a ``tee``. + """Create a `tee`. Args: iterable: The iterable to split. @@ -269,7 +269,7 @@ atee = Tee class aclosing(AbstractAsyncContextManager): # noqa: N801 - """Async context manager to wrap an AsyncGenerator that has a ``aclose()`` method. + """Async context manager to wrap an AsyncGenerator that has a `aclose()` method. Code like this: diff --git a/libs/core/langchain_core/utils/function_calling.py b/libs/core/langchain_core/utils/function_calling.py index 979fe0c09e1..0ed4575217c 100644 --- a/libs/core/langchain_core/utils/function_calling.py +++ b/libs/core/langchain_core/utils/function_calling.py @@ -409,7 +409,7 @@ def convert_to_openai_function( tool, or an Amazon Bedrock Converse format tool. strict: If `True`, model output is guaranteed to exactly match the JSON Schema - provided in the function definition. If `None`, ``strict`` argument will not + provided in the function definition. If `None`, `strict` argument will not be included in function definition. Returns: @@ -420,7 +420,7 @@ def convert_to_openai_function( ValueError: If function is not in a supported format. !!! warning "Behavior changed in 0.2.29" - ``strict`` arg added. + `strict` arg added. !!! warning "Behavior changed in 0.3.13" Support for Anthropic format tools added. @@ -539,7 +539,7 @@ def convert_to_openai_tool( tool, or an Amazon Bedrock Converse format tool. strict: If `True`, model output is guaranteed to exactly match the JSON Schema - provided in the function definition. If `None`, ``strict`` argument will not + provided in the function definition. If `None`, `strict` argument will not be included in tool definition. Returns: @@ -547,7 +547,7 @@ def convert_to_openai_tool( OpenAI tool-calling API. !!! warning "Behavior changed in 0.2.29" - ``strict`` arg added. + `strict` arg added. !!! warning "Behavior changed in 0.3.13" Support for Anthropic format tools added. @@ -602,7 +602,7 @@ def convert_to_json_schema( Args: schema: The schema to convert. strict: If `True`, model output is guaranteed to exactly match the JSON Schema - provided in the function definition. If `None`, ``strict`` argument will not + provided in the function definition. If `None`, `strict` argument will not be included in function definition. Raises: @@ -652,9 +652,9 @@ def tool_example_to_messages( 1. `HumanMessage`: contains the content from which content should be extracted. 2. `AIMessage`: contains the extracted information from the model 3. `ToolMessage`: contains confirmation to the model that the model requested a - tool correctly. + tool correctly. - If ``ai_response`` is specified, there will be a final `AIMessage` with that + If `ai_response` is specified, there will be a final `AIMessage` with that response. The `ToolMessage` is required because some chat models are hyper-optimized for diff --git a/libs/core/langchain_core/utils/iter.py b/libs/core/langchain_core/utils/iter.py index 45a6e07019a..4684bf17e8b 100644 --- a/libs/core/langchain_core/utils/iter.py +++ b/libs/core/langchain_core/utils/iter.py @@ -43,7 +43,7 @@ def tee_peer( """An individual iterator of a `.tee`. This function is a generator that yields items from the shared iterator - ``iterator``. It buffers items until the least advanced iterator has + `iterator`. It buffers items until the least advanced iterator has yielded them as well. The buffer is shared with all other peers. Args: @@ -89,14 +89,14 @@ def tee_peer( class Tee(Generic[T]): - """Create ``n`` separate asynchronous iterators over ``iterable``. + """Create `n` separate asynchronous iterators over `iterable`. - This splits a single ``iterable`` into multiple iterators, each providing + This splits a single `iterable` into multiple iterators, each providing the same items in the same order. All child iterators may advance separately but share the same items - from ``iterable`` -- when the most advanced iterator retrieves an item, + from `iterable` -- when the most advanced iterator retrieves an item, it is buffered until the least advanced iterator has yielded it as well. - A ``tee`` works lazily and can handle an infinite ``iterable``, provided + A `tee` works lazily and can handle an infinite `iterable`, provided that all iterators advance. .. code-block:: python @@ -109,18 +109,18 @@ class Tee(Generic[T]): Unlike `itertools.tee`, `.tee` returns a custom type instead of a :py`tuple`. Like a tuple, it can be indexed, iterated and unpacked to get the child iterators. In addition, its `.tee.aclose` method - immediately closes all children, and it can be used in an ``async with`` context + immediately closes all children, and it can be used in an `async with` context for the same effect. - If ``iterable`` is an iterator and read elsewhere, ``tee`` will *not* - provide these items. Also, ``tee`` must internally buffer each item until the + If `iterable` is an iterator and read elsewhere, `tee` will *not* + provide these items. Also, `tee` must internally buffer each item until the last iterator has yielded it; if the most and least advanced iterator differ by most data, using a :py`list` is more efficient (but not lazy). - If the underlying iterable is concurrency safe (``anext`` may be awaited + If the underlying iterable is concurrency safe (`anext` may be awaited concurrently) the resulting iterators are concurrency safe as well. Otherwise, the iterators are safe if there is only ever one single "most advanced" iterator. - To enforce sequential use of ``anext``, provide a ``lock`` + To enforce sequential use of `anext`, provide a `lock` - e.g. an :py`asyncio.Lock` instance in an :py:mod:`asyncio` application - and access is automatically synchronised. @@ -133,7 +133,7 @@ class Tee(Generic[T]): *, lock: AbstractContextManager[Any] | None = None, ): - """Create a ``tee``. + """Create a `tee`. Args: iterable: The iterable to split. diff --git a/libs/core/langchain_core/utils/mustache.py b/libs/core/langchain_core/utils/mustache.py index affb221d937..f7ce655d548 100644 --- a/libs/core/langchain_core/utils/mustache.py +++ b/libs/core/langchain_core/utils/mustache.py @@ -329,7 +329,7 @@ def tokenize( def _html_escape(string: str) -> str: - """Return the HTML-escaped string with these characters escaped: ``" & < >``.""" + """Return the HTML-escaped string with these characters escaped: `" & < >`.""" html_codes = { '"': """, "<": "<", diff --git a/libs/core/langchain_core/utils/utils.py b/libs/core/langchain_core/utils/utils.py index e7a86fcb65f..a7070dd5edd 100644 --- a/libs/core/langchain_core/utils/utils.py +++ b/libs/core/langchain_core/utils/utils.py @@ -499,7 +499,7 @@ Used for: def ensure_id(id_val: str | None) -> str: """Ensure the ID is a valid string, generating a new UUID if not provided. - Auto-generated UUIDs are prefixed by ``'lc_'`` to indicate they are + Auto-generated UUIDs are prefixed by `'lc_'` to indicate they are LangChain-generated IDs. Args: diff --git a/libs/core/tests/unit_tests/messages/block_translators/test_registration.py b/libs/core/tests/unit_tests/messages/block_translators/test_registration.py index 74c16d30a24..2e4577c9c1c 100644 --- a/libs/core/tests/unit_tests/messages/block_translators/test_registration.py +++ b/libs/core/tests/unit_tests/messages/block_translators/test_registration.py @@ -11,7 +11,7 @@ def test_all_providers_registered() -> None: If this test fails, it is likely that a block translator is implemented but not registered on import. Check that the provider is included in - ``langchain_core.messages.block_translators.__init__._register_translators``. + `langchain_core.messages.block_translators.__init__._register_translators`. """ package_path = ( Path(__file__).parents[4] / "langchain_core" / "messages" / "block_translators" @@ -20,7 +20,7 @@ def test_all_providers_registered() -> None: for module_info in pkgutil.iter_modules([str(package_path)]): module_name = module_info.name - # Skip the __init__ module, any private modules, and ``langchain_v0``, which is + # Skip the __init__ module, any private modules, and `langchain_v0`, which is # only used to parse v0 multimodal inputs. if module_name.startswith("_") or module_name == "langchain_v0": continue diff --git a/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr b/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr index 9cd3d7e6e1b..1c83ebe84c3 100644 --- a/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr +++ b/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr @@ -385,7 +385,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -991,8 +991,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -1032,9 +1032,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -1114,9 +1114,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python @@ -1811,7 +1811,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -2417,8 +2417,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -2458,9 +2458,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -2540,9 +2540,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr index a1855795797..c1f761ac7f2 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr @@ -809,7 +809,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -1415,8 +1415,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -1456,9 +1456,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -1538,9 +1538,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr index 83bea22fdb9..f10e6b4f721 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr @@ -2336,7 +2336,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -2935,8 +2935,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -2975,9 +2975,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -3056,9 +3056,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python @@ -3805,7 +3805,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -4423,8 +4423,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -4463,9 +4463,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -4544,9 +4544,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python @@ -5305,7 +5305,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -5923,8 +5923,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -5963,9 +5963,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -6044,9 +6044,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python @@ -6680,7 +6680,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -7279,8 +7279,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -7319,9 +7319,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -7400,9 +7400,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python @@ -8191,7 +8191,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -8809,8 +8809,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -8849,9 +8849,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -8930,9 +8930,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python @@ -9611,7 +9611,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -10210,8 +10210,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -10250,9 +10250,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -10331,9 +10331,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python @@ -11030,7 +11030,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -11659,8 +11659,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -11699,9 +11699,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -11780,9 +11780,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python @@ -12491,7 +12491,7 @@ 'description': ''' Message for passing the result of executing a tool back to a model. - ``FunctionMessage`` are an older version of the `ToolMessage` schema, and + `FunctionMessage` are an older version of the `ToolMessage` schema, and do not contain the `tool_call_id` field. The `tool_call_id` field is used to associate the tool call request with the @@ -13109,8 +13109,8 @@ {"name": "foo", "args": {"a": 1}, "id": "123"} - This represents a request to call the tool named ``'foo'`` with arguments - ``{"a": 1}`` and an identifier of ``'123'``. + This represents a request to call the tool named `'foo'` with arguments + `{"a": 1}` and an identifier of `'123'`. ''', 'properties': dict({ 'args': dict({ @@ -13149,9 +13149,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``), + When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`), all string attributes are concatenated. Chunks are only merged if their - values of ``index`` are equal and not None. + values of `index` are equal and not None. Example: @@ -13230,9 +13230,9 @@ Message for passing the result of executing a tool back to a model. `ToolMessage` objects contain the result of a tool invocation. Typically, the result - is encoded inside the ``content`` field. + is encoded inside the `content` field. - Example: A `ToolMessage` representing a result of ``42`` from a tool call with id + Example: A `ToolMessage` representing a result of `42` from a tool call with id .. code-block:: python diff --git a/libs/langchain/langchain_classic/agents/openai_functions_agent/base.py b/libs/langchain/langchain_classic/agents/openai_functions_agent/base.py index f964f4a6eef..035a64caee3 100644 --- a/libs/langchain/langchain_classic/agents/openai_functions_agent/base.py +++ b/libs/langchain/langchain_classic/agents/openai_functions_agent/base.py @@ -344,7 +344,7 @@ def create_openai_functions_agent( Prompt: The agent prompt must have an `agent_scratchpad` key that is a - ``MessagesPlaceholder``. Intermediate agent actions and tool output + `MessagesPlaceholder`. Intermediate agent actions and tool output messages will be passed in here. Here's an example: diff --git a/libs/langchain/langchain_classic/agents/openai_tools/base.py b/libs/langchain/langchain_classic/agents/openai_tools/base.py index 327ad268fc9..2d9c5b02475 100644 --- a/libs/langchain/langchain_classic/agents/openai_tools/base.py +++ b/libs/langchain/langchain_classic/agents/openai_tools/base.py @@ -73,7 +73,7 @@ def create_openai_tools_agent( Prompt: The agent prompt must have an `agent_scratchpad` key that is a - ``MessagesPlaceholder``. Intermediate agent actions and tool output + `MessagesPlaceholder`. Intermediate agent actions and tool output messages will be passed in here. Here's an example: diff --git a/libs/langchain/langchain_classic/agents/output_parsers/xml.py b/libs/langchain/langchain_classic/agents/output_parsers/xml.py index b5cd6058f22..b7d434e26ed 100644 --- a/libs/langchain/langchain_classic/agents/output_parsers/xml.py +++ b/libs/langchain/langchain_classic/agents/output_parsers/xml.py @@ -48,8 +48,8 @@ class XMLAgentOutputParser(AgentOutputParser): !!! note Minimal escaping allows tool names containing XML tags to be safely represented. - For example, a tool named ``searchnested`` would be escaped as - ``search[[tool]]nested[[/tool]]`` in the XML and automatically unescaped during + For example, a tool named `searchnested` would be escaped as + `search[[tool]]nested[[/tool]]` in the XML and automatically unescaped during parsing. Raises: diff --git a/libs/langchain/langchain_classic/agents/tool_calling_agent/base.py b/libs/langchain/langchain_classic/agents/tool_calling_agent/base.py index 8018a94535e..8abfc475f5d 100644 --- a/libs/langchain/langchain_classic/agents/tool_calling_agent/base.py +++ b/libs/langchain/langchain_classic/agents/tool_calling_agent/base.py @@ -86,7 +86,7 @@ def create_tool_calling_agent( Prompt: The agent prompt must have an `agent_scratchpad` key that is a - ``MessagesPlaceholder``. Intermediate agent actions and tool output + `MessagesPlaceholder`. Intermediate agent actions and tool output messages will be passed in here. """ diff --git a/libs/langchain/langchain_classic/chains/combine_documents/base.py b/libs/langchain/langchain_classic/chains/combine_documents/base.py index afe7301d3df..51f50ec87d0 100644 --- a/libs/langchain/langchain_classic/chains/combine_documents/base.py +++ b/libs/langchain/langchain_classic/chains/combine_documents/base.py @@ -189,8 +189,8 @@ class AnalyzeDocumentChain(Chain): This class is deprecated. See below for alternative implementations which supports async and streaming modes of operation. - If the underlying combine documents chain takes one ``input_documents`` argument - (e.g., chains generated by ``load_summarize_chain``): + If the underlying combine documents chain takes one `input_documents` argument + (e.g., chains generated by `load_summarize_chain`): .. code-block:: python @@ -198,8 +198,8 @@ class AnalyzeDocumentChain(Chain): summarize_document_chain = split_text | chain - If the underlying chain takes additional arguments (e.g., ``load_qa_chain``, which - takes an additional ``question`` argument), we can use the following: + If the underlying chain takes additional arguments (e.g., `load_qa_chain`, which + takes an additional `question` argument), we can use the following: .. code-block:: python @@ -212,8 +212,8 @@ class AnalyzeDocumentChain(Chain): input_documents=itemgetter("input_document") | split_text, ) | chain.pick("output_text") - To additionally return the input parameters, as ``AnalyzeDocumentChain`` does, - we can wrap this construction with ``RunnablePassthrough``: + To additionally return the input parameters, as `AnalyzeDocumentChain` does, + we can wrap this construction with `RunnablePassthrough`: .. code-block:: python diff --git a/libs/langchain/langchain_classic/chains/combine_documents/map_rerank.py b/libs/langchain/langchain_classic/chains/combine_documents/map_rerank.py index 1ba6c3e5e45..fb6993ac544 100644 --- a/libs/langchain/langchain_classic/chains/combine_documents/map_rerank.py +++ b/libs/langchain/langchain_classic/chains/combine_documents/map_rerank.py @@ -31,8 +31,8 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain): r"""Combining documents by mapping a chain over them, then reranking results. This algorithm calls an LLMChain on each input document. The LLMChain is expected - to have an OutputParser that parses the result into both an answer (``answer_key``) - and a score (``rank_key``). The answer with the highest score is then returned. + to have an OutputParser that parses the result into both an answer (`answer_key`) + and a score (`rank_key`). The answer with the highest score is then returned. Example: .. code-block:: python diff --git a/libs/langchain/langchain_classic/chains/conversation/base.py b/libs/langchain/langchain_classic/chains/conversation/base.py index 48a5f7131c2..492c9036485 100644 --- a/libs/langchain/langchain_classic/chains/conversation/base.py +++ b/libs/langchain/langchain_classic/chains/conversation/base.py @@ -19,18 +19,18 @@ from langchain_classic.memory.buffer import ConversationBufferMemory class ConversationChain(LLMChain): """Chain to have a conversation and load context from memory. - This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer + This class is deprecated in favor of `RunnableWithMessageHistory`. Please refer to this tutorial for more detail: https://python.langchain.com/docs/tutorials/chatbot/ - ``RunnableWithMessageHistory`` offers several benefits, including: + `RunnableWithMessageHistory` offers several benefits, including: - Stream, batch, and async support; - More flexible memory handling, including the ability to manage memory - outside the chain; + outside the chain; - Support for multiple threads. - Below is a minimal implementation, analogous to using ``ConversationChain`` with - the default ``ConversationBufferMemory``: + Below is a minimal implementation, analogous to using `ConversationChain` with + the default `ConversationBufferMemory`: .. code-block:: python @@ -56,7 +56,7 @@ class ConversationChain(LLMChain): config={"configurable": {"session_id": "1"}}, ) # session_id determines thread - Memory objects can also be incorporated into the ``get_session_history`` callable: + Memory objects can also be incorporated into the `get_session_history` callable: .. code-block:: python diff --git a/libs/langchain/langchain_classic/chains/conversational_retrieval/base.py b/libs/langchain/langchain_classic/chains/conversational_retrieval/base.py index 2d1af746464..a70e18b140b 100644 --- a/libs/langchain/langchain_classic/chains/conversational_retrieval/base.py +++ b/libs/langchain/langchain_classic/chains/conversational_retrieval/base.py @@ -389,7 +389,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain): max_tokens_limit: int | None = None """If set, enforces that the documents returned are less than this limit. - This is only enforced if ``combine_docs_chain`` is of type StuffDocumentsChain. + This is only enforced if `combine_docs_chain` is of type StuffDocumentsChain. """ def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]: diff --git a/libs/langchain/langchain_classic/chains/moderation.py b/libs/langchain/langchain_classic/chains/moderation.py index b3f2c21ad6c..8aaf1b8f4af 100644 --- a/libs/langchain/langchain_classic/chains/moderation.py +++ b/libs/langchain/langchain_classic/chains/moderation.py @@ -17,7 +17,7 @@ class OpenAIModerationChain(Chain): """Pass input through a moderation endpoint. To use, you should have the `openai` python package installed, and the - environment variable ``OPENAI_API_KEY`` set with your API key. + environment variable `OPENAI_API_KEY` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. diff --git a/libs/langchain/langchain_classic/chat_models/base.py b/libs/langchain/langchain_classic/chat_models/base.py index e2dd7ecd9b8..4b5f5b2a4b2 100644 --- a/libs/langchain/langchain_classic/chat_models/base.py +++ b/libs/langchain/langchain_classic/chat_models/base.py @@ -84,9 +84,9 @@ def init_chat_model( to see what parameters are supported by the model. Args: - model: The name of the model, e.g. ``'o3-mini'``, ``'claude-3-5-sonnet-latest'``. You can + model: The name of the model, e.g. `'o3-mini'`, `'claude-3-5-sonnet-latest'`. You can also specify model and model provider in a single argument using - ``'{model_provider}:{model}'`` format, e.g. ``'openai:o1'``. + `'{model_provider}:{model}'` format, e.g. `'openai:o1'`. model_provider: The model provider if not specified as part of model arg (see above). Supported model_provider values and the corresponding integration package are: @@ -134,19 +134,19 @@ def init_chat_model( Fields are assumed to have config_prefix stripped if there is a config_prefix. If model is specified, then defaults to None. If model is - not specified, then defaults to ``("model", "model_provider")``. + not specified, then defaults to `("model", "model_provider")`. - ***Security Note***: Setting ``configurable_fields="any"`` means fields like - ``api_key``, ``base_url``, etc. can be altered at runtime, potentially redirecting + ***Security Note***: Setting `configurable_fields="any"` means fields like + `api_key`, `base_url`, etc. can be altered at runtime, potentially redirecting model requests to a different service/user. Make sure that if you're accepting untrusted configurations that you enumerate the - ``configurable_fields=(...)`` explicitly. + `configurable_fields=(...)` explicitly. - config_prefix: If ``'config_prefix'`` is a non-empty string then model will be + config_prefix: If `'config_prefix'` is a non-empty string then model will be configurable at runtime via the - ``config["configurable"]["{config_prefix}_{param}"]`` keys. If - ``'config_prefix'`` is an empty string then model will be configurable via - ``config["configurable"]["{param}"]``. + `config["configurable"]["{config_prefix}_{param}"]` keys. If + `'config_prefix'` is an empty string then model will be configurable via + `config["configurable"]["{param}"]`. temperature: Model temperature. max_tokens: Max output tokens. timeout: The maximum time (in seconds) to wait for a response from the model @@ -154,10 +154,10 @@ def init_chat_model( max_retries: The maximum number of attempts the system will make to resend a request if it fails due to issues like network timeouts or rate limits. base_url: The URL of the API endpoint where requests are sent. - rate_limiter: A ``BaseRateLimiter`` to space out requests to avoid exceeding + rate_limiter: A `BaseRateLimiter` to space out requests to avoid exceeding rate limits. kwargs: Additional model-specific keyword args to pass to - ``<>.__init__(model=model_name, **kwargs)``. + `<>.__init__(model=model_name, **kwargs)`. Returns: A BaseChatModel corresponding to the model_name and model_provider specified if @@ -289,7 +289,7 @@ def init_chat_model( !!! version-added "Added in version 0.2.7" !!! warning "Behavior changed in 0.2.8" - Support for `configurable_fields` and ``config_prefix`` added. + Support for `configurable_fields` and `config_prefix` added. !!! warning "Behavior changed in 0.2.12" Support for Ollama via langchain-ollama package added diff --git a/libs/langchain/langchain_classic/embeddings/cache.py b/libs/langchain/langchain_classic/embeddings/cache.py index 169c217ef3a..50e34fc05e2 100644 --- a/libs/langchain/langchain_classic/embeddings/cache.py +++ b/libs/langchain/langchain_classic/embeddings/cache.py @@ -46,10 +46,10 @@ def _make_default_key_encoder(namespace: str, algorithm: str) -> Callable[[str], Args: namespace: Prefix that segregates keys from different embedding models. algorithm: - * ``'sha1'`` - fast but not collision-resistant - * ``'blake2b'`` - cryptographically strong, faster than SHA-1 - * ``'sha256'`` - cryptographically strong, slower than SHA-1 - * ``'sha512'`` - cryptographically strong, slower than SHA-1 + * `'sha1'` - fast but not collision-resistant + * `'blake2b'` - cryptographically strong, faster than SHA-1 + * `'sha256'` - cryptographically strong, slower than SHA-1 + * `'sha512'` - cryptographically strong, slower than SHA-1 Returns: A function that encodes a key using the specified algorithm. @@ -242,7 +242,7 @@ class CacheBackedEmbeddings(Embeddings): """Embed query text. By default, this method does not cache queries. To enable caching, set the - ``cache_query`` parameter to `True` when initializing the embedder. + `cache_query` parameter to `True` when initializing the embedder. Args: text: The text to embed. @@ -265,7 +265,7 @@ class CacheBackedEmbeddings(Embeddings): """Embed query text. By default, this method does not cache queries. To enable caching, set the - ``cache_query`` parameter to `True` when initializing the embedder. + `cache_query` parameter to `True` when initializing the embedder. Args: text: The text to embed. diff --git a/libs/langchain/langchain_classic/retrievers/document_compressors/listwise_rerank.py b/libs/langchain/langchain_classic/retrievers/document_compressors/listwise_rerank.py index 883a81239b6..10fd973060f 100644 --- a/libs/langchain/langchain_classic/retrievers/document_compressors/listwise_rerank.py +++ b/libs/langchain/langchain_classic/retrievers/document_compressors/listwise_rerank.py @@ -42,7 +42,7 @@ class LLMListwiseRerank(BaseDocumentCompressor): Adapted from: https://arxiv.org/pdf/2305.02156.pdf - ``LLMListwiseRerank`` uses a language model to rerank a list of documents based on + `LLMListwiseRerank` uses a language model to rerank a list of documents based on their relevance to a query. **NOTE**: requires that underlying model implement `with_structured_output`. diff --git a/libs/langchain/langchain_classic/smith/__init__.py b/libs/langchain/langchain_classic/smith/__init__.py index e86b8dfed3f..8965a15f802 100644 --- a/libs/langchain/langchain_classic/smith/__init__.py +++ b/libs/langchain/langchain_classic/smith/__init__.py @@ -10,7 +10,7 @@ see the [LangSmith documentation](https://docs.smith.langchain.com/). LangSmith helps you evaluate Chains and other language model application components using a number of LangChain evaluators. An example of this is shown below, assuming you've created a LangSmith dataset -called ````: +called ``: .. code-block:: python diff --git a/libs/langchain/langchain_classic/smith/evaluation/__init__.py b/libs/langchain/langchain_classic/smith/evaluation/__init__.py index 468f2ddf528..4038a324569 100644 --- a/libs/langchain/langchain_classic/smith/evaluation/__init__.py +++ b/libs/langchain/langchain_classic/smith/evaluation/__init__.py @@ -43,13 +43,13 @@ For more information on the LangSmith API, see the **Attributes** -- ``arun_on_dataset``: Asynchronous function to evaluate a chain or other LangChain - component over a dataset. -- ``run_on_dataset``: Function to evaluate a chain or other LangChain component over a - dataset. -- ``RunEvalConfig``: Class representing the configuration for running evaluation. -- ``StringRunEvaluatorChain``: Class representing a string run evaluator chain. -- ``InputFormatError``: Exception raised when the input format is incorrect. +- `arun_on_dataset`: Asynchronous function to evaluate a chain or other LangChain + component over a dataset. +- `run_on_dataset`: Function to evaluate a chain or other LangChain component over a + dataset. +- `RunEvalConfig`: Class representing the configuration for running evaluation. +- `StringRunEvaluatorChain`: Class representing a string run evaluator chain. +- `InputFormatError`: Exception raised when the input format is incorrect. """ diff --git a/libs/langchain/tests/integration_tests/examples/README.rst b/libs/langchain/tests/integration_tests/examples/README.rst index 45630d0385d..7d1419ab272 100644 --- a/libs/langchain/tests/integration_tests/examples/README.rst +++ b/libs/langchain/tests/integration_tests/examples/README.rst @@ -3,9 +3,9 @@ Example Docs The sample docs directory contains the following files: -- ``example-10k.html`` - A 10-K SEC filing in HTML format -- ``layout-parser-paper.pdf`` - A PDF copy of the layout parser paper -- ``factbook.xml``/``factbook.xsl`` - Example XML/XLS files that you +- `example-10k.html` - A 10-K SEC filing in HTML format +- `layout-parser-paper.pdf` - A PDF copy of the layout parser paper +- `factbook.xml`/`factbook.xsl` - Example XML/XLS files that you can use to test stylesheets These documents can be used to test out the parsers in the library. In @@ -16,7 +16,7 @@ XBRL 10-K ^^^^^^^^^ You can get an example 10-K in inline XBRL format using the following -``curl``. Note, you need to have the user agent set in the header or the +`curl`. Note, you need to have the user agent set in the header or the SEC site will reject your request. .. code:: bash diff --git a/libs/langchain/tests/unit_tests/chat_models/test_base.py b/libs/langchain/tests/unit_tests/chat_models/test_base.py index 42a5d005cfd..8e023f9de12 100644 --- a/libs/langchain/tests/unit_tests/chat_models/test_base.py +++ b/libs/langchain/tests/unit_tests/chat_models/test_base.py @@ -197,8 +197,8 @@ def test_configurable_with_default() -> None: Verifies that a configurable chat model initialized with default parameters: - Has access to all standard runnable methods (`invoke`, `stream`, etc.) - - Provides immediate access to non-configurable methods (e.g. ``get_num_tokens``) - - Supports model switching through runtime configuration using ``config_prefix`` + - Provides immediate access to non-configurable methods (e.g. `get_num_tokens`) + - Supports model switching through runtime configuration using `config_prefix` - Maintains proper model identity and attributes when reconfigured - Can be used in chains with different model providers via configuration diff --git a/libs/langchain/tests/unit_tests/llms/fake_chat_model.py b/libs/langchain/tests/unit_tests/llms/fake_chat_model.py index 2cb65ac3930..0dbff53aa92 100644 --- a/libs/langchain/tests/unit_tests/llms/fake_chat_model.py +++ b/libs/langchain/tests/unit_tests/llms/fake_chat_model.py @@ -58,20 +58,20 @@ class GenericFakeChatModel(BaseChatModel): """A generic fake chat model that can be used to test the chat model interface. * Chat model should be usable in both sync and async tests - * Invokes ``on_llm_new_token`` to allow for testing of callback related code for new - tokens. + * Invokes `on_llm_new_token` to allow for testing of callback related code for new + tokens. * Includes logic to break messages into message chunk to facilitate testing of - streaming. + streaming. """ messages: Iterator[AIMessage] """Get an iterator over messages. - This can be expanded to accept other types like ``Callables`` / dicts / strings + This can be expanded to accept other types like `Callables` / dicts / strings to make the interface more generic if needed. !!! note - If you want to pass a list, you can use ``iter`` to convert it to an iterator. + If you want to pass a list, you can use `iter` to convert it to an iterator. !!! warning Streaming is not implemented yet. We should try to implement it in the future by diff --git a/libs/langchain_v1/langchain/agents/factory.py b/libs/langchain_v1/langchain/agents/factory.py index e3079b29df7..f61d11d067d 100644 --- a/libs/langchain_v1/langchain/agents/factory.py +++ b/libs/langchain_v1/langchain/agents/factory.py @@ -438,12 +438,12 @@ def create_agent( # noqa: PLR0915 ]: """Creates an agent graph that calls tools in a loop until a stopping condition is met. - For more details on using ``create_agent``, + For more details on using `create_agent`, visit [Agents](https://docs.langchain.com/oss/python/langchain/agents) documentation. Args: model: The language model for the agent. Can be a string identifier - (e.g., ``"openai:gpt-4"``), a chat model instance (e.g., ``ChatOpenAI()``). + (e.g., `"openai:gpt-4"`), a chat model instance (e.g., `ChatOpenAI()`). tools: A list of tools, dicts, or callables. If `None` or an empty list, the agent will consist of a model node without a tool calling loop. system_prompt: An optional system prompt for the LLM. If provided as a string, @@ -753,7 +753,7 @@ def create_agent( # noqa: PLR0915 request: The model request containing model, tools, and response format. Returns: - Tuple of (bound_model, effective_response_format) where ``effective_response_format`` + Tuple of (bound_model, effective_response_format) where `effective_response_format` is the actual strategy used (may differ from initial if auto-detected). """ # Validate ONLY client-side tools that need to exist in tool_node diff --git a/libs/langchain_v1/langchain/agents/middleware/context_editing.py b/libs/langchain_v1/langchain/agents/middleware/context_editing.py index 224a96c9a42..c26e28dc330 100644 --- a/libs/langchain_v1/langchain/agents/middleware/context_editing.py +++ b/libs/langchain_v1/langchain/agents/middleware/context_editing.py @@ -180,8 +180,8 @@ class ContextEditingMiddleware(AgentMiddleware): """Middleware that automatically prunes tool results to manage context size. The middleware applies a sequence of edits when the total input token count - exceeds configured thresholds. Currently the ``ClearToolUsesEdit`` strategy is - supported, aligning with Anthropic's ``clear_tool_uses_20250919`` behaviour. + exceeds configured thresholds. Currently the `ClearToolUsesEdit` strategy is + supported, aligning with Anthropic's `clear_tool_uses_20250919` behaviour. """ edits: list[ContextEdit] diff --git a/libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py b/libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py index 222dc9ccb45..d698c6d7e27 100644 --- a/libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py +++ b/libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py @@ -165,12 +165,12 @@ class HumanInTheLoopMiddleware(AgentMiddleware): * `True` indicates all actions are allowed: accept, edit, and respond. * `False` indicates that the tool is auto-approved. - * ``ToolConfig`` indicates the specific actions allowed for this tool. - The ToolConfig can include a ``description`` field (str or callable) for - custom formatting of the interrupt description. + * `ToolConfig` indicates the specific actions allowed for this tool. + The ToolConfig can include a `description` field (str or callable) for + custom formatting of the interrupt description. description_prefix: The prefix to use when constructing action requests. This is used to provide context about the tool call and the action being requested. - Not used if a tool has a ``description`` in its ToolConfig. + Not used if a tool has a `description` in its ToolConfig. """ super().__init__() resolved_tool_configs: dict[str, ToolConfig] = {} diff --git a/libs/langchain_v1/langchain/agents/middleware/pii.py b/libs/langchain_v1/langchain/agents/middleware/pii.py index 4d667cf575f..00e28bfe26b 100644 --- a/libs/langchain_v1/langchain/agents/middleware/pii.py +++ b/libs/langchain_v1/langchain/agents/middleware/pii.py @@ -417,17 +417,17 @@ class PIIMiddleware(AgentMiddleware): MAC addresses, and URLs in both user input and agent output. Built-in PII types: - - ``email``: Email addresses - - ``credit_card``: Credit card numbers (validated with Luhn algorithm) - - ``ip``: IP addresses (validated with stdlib) - - ``mac_address``: MAC addresses - - ``url``: URLs (both http/https and bare URLs) + - `email`: Email addresses + - `credit_card`: Credit card numbers (validated with Luhn algorithm) + - `ip`: IP addresses (validated with stdlib) + - `mac_address`: MAC addresses + - `url`: URLs (both http/https and bare URLs) Strategies: - - ``block``: Raise an exception when PII is detected - - ``redact``: Replace PII with ``[REDACTED_TYPE]`` placeholders - - ``mask``: Partially mask PII (e.g., ``****-****-****-1234`` for credit card) - - ``hash``: Replace PII with deterministic hash (e.g., ````) + - `block`: Raise an exception when PII is detected + - `redact`: Replace PII with `[REDACTED_TYPE]` placeholders + - `mask`: Partially mask PII (e.g., `****-****-****-1234` for credit card) + - `hash`: Replace PII with deterministic hash (e.g., ``) Strategy Selection Guide: @@ -487,19 +487,19 @@ class PIIMiddleware(AgentMiddleware): Args: pii_type: Type of PII to detect. Can be a built-in type - (``email``, ``credit_card``, ``ip``, ``mac_address``, ``url``) + (`email`, `credit_card`, `ip`, `mac_address`, `url`) or a custom type name. strategy: How to handle detected PII: - * ``block``: Raise PIIDetectionError when PII is detected - * ``redact``: Replace with ``[REDACTED_TYPE]`` placeholders - * ``mask``: Partially mask PII (show last few characters) - * ``hash``: Replace with deterministic hash (format: ````) + * `block`: Raise PIIDetectionError when PII is detected + * `redact`: Replace with `[REDACTED_TYPE]` placeholders + * `mask`: Partially mask PII (show last few characters) + * `hash`: Replace with deterministic hash (format: ``) detector: Custom detector function or regex pattern. - * If ``Callable``: Function that takes content string and returns - list of PIIMatch objects + * If `Callable`: Function that takes content string and returns + list of PIIMatch objects * If `str`: Regex pattern to match PII * If `None`: Uses built-in detector for the pii_type diff --git a/libs/langchain_v1/langchain/agents/middleware/planning.py b/libs/langchain_v1/langchain/agents/middleware/planning.py index 3f78be947ec..3278ed8f125 100644 --- a/libs/langchain_v1/langchain/agents/middleware/planning.py +++ b/libs/langchain_v1/langchain/agents/middleware/planning.py @@ -146,9 +146,9 @@ class PlanningMiddleware(AgentMiddleware): Args: system_prompt: Custom system prompt to guide the agent on using the todo tool. - If not provided, uses the default ``WRITE_TODOS_SYSTEM_PROMPT``. + If not provided, uses the default `WRITE_TODOS_SYSTEM_PROMPT`. tool_description: Custom description for the write_todos tool. - If not provided, uses the default ``WRITE_TODOS_TOOL_DESCRIPTION``. + If not provided, uses the default `WRITE_TODOS_TOOL_DESCRIPTION`. """ state_schema = PlanningState diff --git a/libs/langchain_v1/langchain/chat_models/base.py b/libs/langchain_v1/langchain/chat_models/base.py index 5b0bc4a08ee..f49b2af7c3b 100644 --- a/libs/langchain_v1/langchain/chat_models/base.py +++ b/libs/langchain_v1/langchain/chat_models/base.py @@ -128,21 +128,21 @@ def init_chat_model( Fields are assumed to have config_prefix stripped if there is a config_prefix. If model is specified, then defaults to None. If model is - not specified, then defaults to ``("model", "model_provider")``. + not specified, then defaults to `("model", "model_provider")`. - ***Security Note***: Setting ``configurable_fields="any"`` means fields like + **Security Note**: Setting `configurable_fields="any"` means fields like api_key, base_url, etc. can be altered at runtime, potentially redirecting model requests to a different service/user. Make sure that if you're accepting untrusted configurations that you enumerate the - ``configurable_fields=(...)`` explicitly. + `configurable_fields=(...)` explicitly. config_prefix: If config_prefix is a non-empty string then model will be configurable at runtime via the - ``config["configurable"]["{config_prefix}_{param}"]`` keys. If + `config["configurable"]["{config_prefix}_{param}"]` keys. If config_prefix is an empty string then model will be configurable via - ``config["configurable"]["{param}"]``. + `config["configurable"]["{param}"]`. kwargs: Additional model-specific keyword args to pass to - ``<>.__init__(model=model_name, **kwargs)``. Examples + `<>.__init__(model=model_name, **kwargs)`. Examples include: * temperature: Model temperature. * max_tokens: Max output tokens. @@ -151,7 +151,7 @@ def init_chat_model( * max_retries: The maximum number of attempts the system will make to resend a request if it fails due to issues like network timeouts or rate limits. * base_url: The URL of the API endpoint where requests are sent. - * rate_limiter: A ``BaseRateLimiter`` to space out requests to avoid exceeding + * rate_limiter: A `BaseRateLimiter` to space out requests to avoid exceeding rate limits. Returns: @@ -272,7 +272,7 @@ def init_chat_model( !!! version-added "Added in version 0.2.7" !!! warning "Behavior changed in 0.2.8" - Support for `configurable_fields` and ``config_prefix`` added. + Support for `configurable_fields` and `config_prefix` added. !!! warning "Behavior changed in 0.2.12" Support for Ollama via langchain-ollama package added diff --git a/libs/langchain_v1/langchain/embeddings/cache.py b/libs/langchain_v1/langchain/embeddings/cache.py index b315368a9dd..83d071853d8 100644 --- a/libs/langchain_v1/langchain/embeddings/cache.py +++ b/libs/langchain_v1/langchain/embeddings/cache.py @@ -49,10 +49,10 @@ def _make_default_key_encoder(namespace: str, algorithm: str) -> Callable[[str], Args: namespace: Prefix that segregates keys from different embedding models. algorithm: - * ``'sha1'`` - fast but not collision-resistant - * ``'blake2b'`` - cryptographically strong, faster than SHA-1 - * ``'sha256'`` - cryptographically strong, slower than SHA-1 - * ``'sha512'`` - cryptographically strong, slower than SHA-1 + * `'sha1'` - fast but not collision-resistant + * `'blake2b'` - cryptographically strong, faster than SHA-1 + * `'sha256'` - cryptographically strong, slower than SHA-1 + * `'sha512'` - cryptographically strong, slower than SHA-1 Returns: A function that encodes a key using the specified algorithm. @@ -235,7 +235,7 @@ class CacheBackedEmbeddings(Embeddings): """Embed query text. By default, this method does not cache queries. To enable caching, set the - ``cache_query`` parameter to `True` when initializing the embedder. + `cache_query` parameter to `True` when initializing the embedder. Args: text: The text to embed. @@ -258,7 +258,7 @@ class CacheBackedEmbeddings(Embeddings): """Embed query text. By default, this method does not cache queries. To enable caching, set the - ``cache_query`` parameter to `True` when initializing the embedder. + `cache_query` parameter to `True` when initializing the embedder. Args: text: The text to embed. diff --git a/libs/langchain_v1/langchain/tools/tool_node.py b/libs/langchain_v1/langchain/tools/tool_node.py index c8b40576a26..d856aef94a1 100644 --- a/libs/langchain_v1/langchain/tools/tool_node.py +++ b/libs/langchain_v1/langchain/tools/tool_node.py @@ -138,8 +138,8 @@ with potentially modified requests each time. Each call to execute is independent and stateless. Note: - When implementing middleware for ``create_agent``, use - ``AgentMiddleware.wrap_tool_call`` which provides properly typed + When implementing middleware for `create_agent`, use + `AgentMiddleware.wrap_tool_call` which provides properly typed state parameter for better type safety. Examples: @@ -391,25 +391,25 @@ class ToolNode(RunnableCallable): Input Formats: 1. Graph state with `messages` key that has a list of messages: - - Common representation for agentic workflows - - Supports custom messages key via ``messages_key`` parameter + - Common representation for agentic workflows + - Supports custom messages key via `messages_key` parameter - 2. **Message List**: ``[AIMessage(..., tool_calls=[...])]`` - - List of messages with tool calls in the last AIMessage + 2. **Message List**: `[AIMessage(..., tool_calls=[...])]` + - List of messages with tool calls in the last AIMessage - 3. **Direct Tool Calls**: ``[{"name": "tool", "args": {...}, "id": "1", "type": "tool_call"}]`` - - Bypasses message parsing for direct tool execution - - For programmatic tool invocation and testing + 3. **Direct Tool Calls**: `[{"name": "tool", "args": {...}, "id": "1", "type": "tool_call"}]` + - Bypasses message parsing for direct tool execution + - For programmatic tool invocation and testing Output Formats: Output format depends on input type and tool behavior: **For Regular tools**: - - Dict input → ``{"messages": [ToolMessage(...)]}`` - - List input → ``[ToolMessage(...)]`` + - Dict input → `{"messages": [ToolMessage(...)]}` + - List input → `[ToolMessage(...)]` **For Command tools**: - - Returns ``[Command(...)]`` or mixed list with regular tool outputs + - Returns `[Command(...)]` or mixed list with regular tool outputs - Commands can update state, trigger navigation, or send messages Args: @@ -424,17 +424,17 @@ class ToolNode(RunnableCallable): Supports multiple strategies: - **True**: Catch all errors and return a ToolMessage with the default - error template containing the exception details. + error template containing the exception details. - **str**: Catch all errors and return a ToolMessage with this custom - error message string. + error message string. - **type[Exception]**: Only catch exceptions with the specified type and - return the default error message for it. + return the default error message for it. - **tuple[type[Exception], ...]**: Only catch exceptions with the specified - types and return default error messages for them. + types and return default error messages for them. - **Callable[..., str]**: Catch exceptions matching the callable's signature - and return the string result of calling it with the exception. + and return the string result of calling it with the exception. - **False**: Disable error handling entirely, allowing exceptions to - propagate. + propagate. Defaults to a callable that: - catches tool invocation errors (due to invalid arguments provided by the model) and returns a descriptive error message @@ -1294,7 +1294,7 @@ class InjectedState(InjectedToolArg): node.invoke(state) ``` - ```pycon + ```python [ ToolMessage(content="not enough messages", name="state_tool", tool_call_id="1"), ToolMessage(content="bar2", name="foo_tool", tool_call_id="2"), @@ -1303,12 +1303,12 @@ class InjectedState(InjectedToolArg): Note: - InjectedState arguments are automatically excluded from tool schemas - presented to language models + presented to language models - ToolNode handles the injection process during execution - Tools can mix regular arguments (controlled by the model) with injected - arguments (controlled by the system) + arguments (controlled by the system) - State injection occurs after the model generates tool calls but before - tool execution + tool execution """ def __init__(self, field: str | None = None) -> None: @@ -1383,7 +1383,7 @@ class InjectedStore(InjectedToolArg): Note: - InjectedStore arguments are automatically excluded from tool schemas - presented to language models + presented to language models - The store instance is automatically injected by ToolNode during execution - Tools can access namespaced storage using the store's get/put methods - Store injection requires the graph to be compiled with a store instance diff --git a/libs/langchain_v1/tests/unit_tests/agents/test_response_format.py b/libs/langchain_v1/tests/unit_tests/agents/test_response_format.py index 89f2a6aef5d..a7963ced16f 100644 --- a/libs/langchain_v1/tests/unit_tests/agents/test_response_format.py +++ b/libs/langchain_v1/tests/unit_tests/agents/test_response_format.py @@ -693,7 +693,7 @@ class TestDynamicModelWithResponseFormat: def test_middleware_model_swap_provider_to_tool_strategy(self) -> None: """Test that strategy resolution is deferred until after middleware modifies the model. - Verifies that when a raw schema is provided, ``_supports_provider_strategy`` is called + Verifies that when a raw schema is provided, `_supports_provider_strategy` is called on the middleware-modified model (not the original), ensuring the correct strategy is selected based on the final model's capabilities. """ diff --git a/libs/langchain_v1/tests/unit_tests/chat_models/test_chat_models.py b/libs/langchain_v1/tests/unit_tests/chat_models/test_chat_models.py index 5b53f3d7360..295002a9b2f 100644 --- a/libs/langchain_v1/tests/unit_tests/chat_models/test_chat_models.py +++ b/libs/langchain_v1/tests/unit_tests/chat_models/test_chat_models.py @@ -198,8 +198,8 @@ def test_configurable_with_default() -> None: Verifies that a configurable chat model initialized with default parameters: - Has access to all standard runnable methods (`invoke`, `stream`, etc.) - - Provides immediate access to non-configurable methods (e.g. ``get_num_tokens``) - - Supports model switching through runtime configuration using ``config_prefix`` + - Provides immediate access to non-configurable methods (e.g. `get_num_tokens`) + - Supports model switching through runtime configuration using `config_prefix` - Maintains proper model identity and attributes when reconfigured - Can be used in chains with different model providers via configuration diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 6f69327df38..31bf2ae9f97 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -569,7 +569,7 @@ class ChatAnthropic(BaseChatModel): list of the latest models. Setup: - Install `langchain-anthropic` and set environment variable ``ANTHROPIC_API_KEY``. + Install `langchain-anthropic` and set environment variable `ANTHROPIC_API_KEY`. .. code-block:: bash @@ -578,9 +578,9 @@ class ChatAnthropic(BaseChatModel): Key init args — completion params: model: str - Name of Anthropic model to use. e.g. ``'claude-3-7-sonnet-20250219'``. + Name of Anthropic model to use. e.g. `'claude-3-7-sonnet-20250219'`. temperature: float - Sampling temperature. Ranges from ``0.0`` to ``1.0``. + Sampling temperature. Ranges from `0.0` to `1.0`. max_tokens: int Max number of tokens to generate. @@ -589,12 +589,12 @@ class ChatAnthropic(BaseChatModel): Timeout for requests. anthropic_proxy: str | None Proxy to use for the Anthropic clients, will be used for every API call. - If not passed in will be read from env var ``ANTHROPIC_PROXY``. + If not passed in will be read from env var `ANTHROPIC_PROXY`. max_retries: int Max number of retries if a request fails. api_key: str | None Anthropic API key. If not passed in will be read from env var - ``ANTHROPIC_API_KEY``. + `ANTHROPIC_API_KEY`. base_url: str | None Base URL for API requests. Only specify if using a proxy or service emulator. @@ -618,7 +618,7 @@ class ChatAnthropic(BaseChatModel): ) **NOTE**: Any param which is not explicitly supported will be passed directly to the - ``anthropic.Anthropic.messages.create(...)`` API every time to the model is + `anthropic.Anthropic.messages.create(...)` API every time to the model is invoked. For example: .. code-block:: python @@ -773,7 +773,7 @@ class ChatAnthropic(BaseChatModel): }, ] - See ``ChatAnthropic.bind_tools()`` method for more. + See `ChatAnthropic.bind_tools()` method for more. Structured output: .. code-block:: python @@ -802,7 +802,7 @@ class ChatAnthropic(BaseChatModel): rating=None, ) - See ``ChatAnthropic.with_structured_output()`` for more. + See `ChatAnthropic.with_structured_output()` for more. Image input: See [multimodal guides](https://python.langchain.com/docs/how_to/multimodal_inputs/) @@ -976,7 +976,7 @@ class ChatAnthropic(BaseChatModel): feature that lets Claude attach context to its answers based on source documents supplied by the user. When [document content blocks](https://docs.anthropic.com/en/docs/build-with-claude/citations#document-types) - with ``"citations": {"enabled": True}`` are included in a query, Claude may + with `"citations": {"enabled": True}` are included in a query, Claude may generate citations in its response. .. code-block:: python @@ -1068,8 +1068,8 @@ class ChatAnthropic(BaseChatModel): {"input_tokens": 25, "output_tokens": 11, "total_tokens": 36} - These can be disabled by setting ``stream_usage=False`` in the stream method, - or by setting ``stream_usage=False`` when initializing ChatAnthropic. + These can be disabled by setting `stream_usage=False` in the stream method, + or by setting `stream_usage=False` when initializing ChatAnthropic. Prompt caching: Prompt caching reduces processing time and costs for repetitive tasks or prompts @@ -1129,7 +1129,7 @@ class ChatAnthropic(BaseChatModel): ??? note "Extended caching" The cache lifetime is 5 minutes by default. If this is too short, you can - apply one hour caching by setting ``ttl`` to ``'1h'``. + apply one hour caching by setting `ttl` to `'1h'`. .. code-block:: python @@ -1152,7 +1152,7 @@ class ChatAnthropic(BaseChatModel): response = llm.invoke(messages) - Details of cached token counts will be included on the ``InputTokenDetails`` + Details of cached token counts will be included on the `InputTokenDetails` of response's `usage_metadata`: .. code-block:: python @@ -1451,8 +1451,8 @@ class ChatAnthropic(BaseChatModel): """Base URL for API requests. Only specify if using a proxy or service emulator. If a value isn't passed in, will attempt to read the value first from - ``ANTHROPIC_API_URL`` and if that is not set, ``ANTHROPIC_BASE_URL``. - If neither are set, the default value of ``https://api.anthropic.com`` will + `ANTHROPIC_API_URL` and if that is not set, `ANTHROPIC_BASE_URL`. + If neither are set, the default value of `https://api.anthropic.com` will be used. """ @@ -1460,14 +1460,14 @@ class ChatAnthropic(BaseChatModel): alias="api_key", default_factory=secret_from_env("ANTHROPIC_API_KEY", default=""), ) - """Automatically read from env var ``ANTHROPIC_API_KEY`` if not provided.""" + """Automatically read from env var `ANTHROPIC_API_KEY` if not provided.""" anthropic_proxy: str | None = Field( default_factory=from_env("ANTHROPIC_PROXY", default=None) ) """Proxy to use for the Anthropic clients, will be used for every API call. - If not provided, will attempt to read from the ``ANTHROPIC_PROXY`` environment + If not provided, will attempt to read from the `ANTHROPIC_PROXY` environment variable.""" default_headers: Mapping[str, str] | None = None @@ -1477,7 +1477,7 @@ class ChatAnthropic(BaseChatModel): """List of beta features to enable. If specified, invocations will be routed through client.beta.messages.create. - Example: ``betas=["mcp-client-2025-04-04"]`` + Example: `betas=["mcp-client-2025-04-04"]` """ model_kwargs: dict[str, Any] = Field(default_factory=dict) @@ -1492,13 +1492,13 @@ class ChatAnthropic(BaseChatModel): thinking: dict[str, Any] | None = Field(default=None) """Parameters for Claude reasoning, - e.g., ``{"type": "enabled", "budget_tokens": 10_000}``""" + e.g., `{"type": "enabled", "budget_tokens": 10_000}`""" mcp_servers: list[dict[str, Any]] | None = None """List of MCP servers to use for the request. - Example: ``mcp_servers=[{"type": "url", "url": "https://mcp.example.com/mcp", - "name": "example-mcp"}]`` + Example: `mcp_servers=[{"type": "url", "url": "https://mcp.example.com/mcp", + "name": "example-mcp"}]` """ context_management: dict[str, Any] | None = None @@ -1978,7 +1978,7 @@ class ChatAnthropic(BaseChatModel): ) - Example — force specific tool call with tool_choice ``''``: + Example — force specific tool call with tool_choice `''`: .. code-block:: python @@ -2183,7 +2183,7 @@ class ChatAnthropic(BaseChatModel): - a `TypedDict` class, - or a Pydantic class. - If ``schema`` is a Pydantic class then the model output will be a + If `schema` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool` @@ -2195,22 +2195,22 @@ class ChatAnthropic(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``raw``, ``parsed``, and ``parsing_error``. + with keys `raw`, `parsed`, and `parsing_error`. kwargs: Additional keyword arguments are ignored. Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is `False` and ``schema`` is a Pydantic class, Runnable outputs - an instance of ``schema`` (i.e., a Pydantic object). + If `include_raw` is `False` and `schema` is a Pydantic class, Runnable outputs + an instance of `schema` (i.e., a Pydantic object). - Otherwise, if ``include_raw`` is `False` then Runnable outputs a dict. + Otherwise, if `include_raw` is `False` then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: + If `include_raw` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None Example: Pydantic schema (include_raw=False): @@ -2347,7 +2347,7 @@ class ChatAnthropic(BaseChatModel): tools: If provided, sequence of dict, BaseModel, function, or BaseTools to be converted to tool schemas. kwargs: Additional keyword arguments are passed to the Anthropic - ``messages.count_tokens`` method. + `messages.count_tokens` method. Basic usage: diff --git a/libs/partners/anthropic/langchain_anthropic/llms.py b/libs/partners/anthropic/langchain_anthropic/llms.py index 6c7f19e8e13..0398202597a 100644 --- a/libs/partners/anthropic/langchain_anthropic/llms.py +++ b/libs/partners/anthropic/langchain_anthropic/llms.py @@ -59,7 +59,7 @@ class _AnthropicCommon(BaseLanguageModel): """Base URL for API requests. Only specify if using a proxy or service emulator. If a value isn't passed in, will attempt to read the value from - ``ANTHROPIC_API_URL``. If not set, the default value ``https://api.anthropic.com`` + `ANTHROPIC_API_URL`. If not set, the default value `https://api.anthropic.com` will be used. """ @@ -67,7 +67,7 @@ class _AnthropicCommon(BaseLanguageModel): alias="api_key", default_factory=secret_from_env("ANTHROPIC_API_KEY", default=""), ) - """Automatically read from env var ``ANTHROPIC_API_KEY`` if not provided.""" + """Automatically read from env var `ANTHROPIC_API_KEY` if not provided.""" HUMAN_PROMPT: str | None = None AI_PROMPT: str | None = None @@ -129,7 +129,7 @@ class _AnthropicCommon(BaseLanguageModel): class AnthropicLLM(LLM, _AnthropicCommon): """Anthropic large language model. - To use, you should have the environment variable ``ANTHROPIC_API_KEY`` + To use, you should have the environment variable `ANTHROPIC_API_KEY` set with your API key, or pass it as a named parameter to the constructor. Example: @@ -293,7 +293,7 @@ class AnthropicLLM(LLM, _AnthropicCommon): return response.content[0].text def convert_prompt(self, prompt: PromptValue) -> str: - """Convert a ``PromptValue`` to a string.""" + """Convert a `PromptValue` to a string.""" return prompt.to_string() async def _acall( diff --git a/libs/partners/chroma/langchain_chroma/vectorstores.py b/libs/partners/chroma/langchain_chroma/vectorstores.py index da9d2de74a5..e0819cae0a8 100644 --- a/libs/partners/chroma/langchain_chroma/vectorstores.py +++ b/libs/partners/chroma/langchain_chroma/vectorstores.py @@ -150,7 +150,7 @@ class Chroma(VectorStore): """Chroma vector store integration. Setup: - Install ``chromadb``, ``langchain-chroma`` packages: + Install `chromadb`, `langchain-chroma` packages: .. code-block:: bash diff --git a/libs/partners/deepseek/langchain_deepseek/chat_models.py b/libs/partners/deepseek/langchain_deepseek/chat_models.py index 1215b246d18..8512b7dbc8a 100644 --- a/libs/partners/deepseek/langchain_deepseek/chat_models.py +++ b/libs/partners/deepseek/langchain_deepseek/chat_models.py @@ -30,7 +30,7 @@ class ChatDeepSeek(BaseChatOpenAI): """DeepSeek chat model integration to access models hosted in DeepSeek's API. Setup: - Install `langchain-deepseek` and set environment variable ``DEEPSEEK_API_KEY``. + Install `langchain-deepseek` and set environment variable `DEEPSEEK_API_KEY`. .. code-block:: bash @@ -126,7 +126,7 @@ class ChatDeepSeek(BaseChatOpenAI): ai_msg = llm_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") ai_msg.tool_calls - See ``ChatDeepSeek.bind_tools()`` method for more. + See `ChatDeepSeek.bind_tools()` method for more. Structured output: .. code-block:: python @@ -147,7 +147,7 @@ class ChatDeepSeek(BaseChatOpenAI): structured_llm = llm.with_structured_output(Joke) structured_llm.invoke("Tell me a joke about cats") - See ``ChatDeepSeek.with_structured_output()`` for more. + See `ChatDeepSeek.with_structured_output()` for more. Token usage: .. code-block:: python @@ -385,7 +385,7 @@ class ChatDeepSeek(BaseChatOpenAI): - a `TypedDict` class (support added in 0.1.20), - or a Pydantic class. - If ``schema`` is a Pydantic class then the model output will be a + If `schema` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool` @@ -394,13 +394,13 @@ class ChatDeepSeek(BaseChatOpenAI): method: The method for steering model generation, one of: - - ``'function_calling'``: + - `'function_calling'`: Uses DeepSeek's [tool-calling features](https://api-docs.deepseek.com/guides/function_calling). - - ``'json_mode'``: + - `'json_mode'`: Uses DeepSeek's [JSON mode feature](https://api-docs.deepseek.com/guides/json_mode). !!! warning "Behavior changed in 0.1.3" - Added support for ``'json_mode'``. + Added support for `'json_mode'`. include_raw: If `False` then only the parsed structured output is returned. If @@ -408,7 +408,7 @@ class ChatDeepSeek(BaseChatOpenAI): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. strict: Whether to enable strict schema adherence when generating the function @@ -422,14 +422,14 @@ class ChatDeepSeek(BaseChatOpenAI): Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs - an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs + an instance of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is False then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: + If `include_raw` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None """ # noqa: E501 # Some applications require that incompatible parameters (e.g., unsupported diff --git a/libs/partners/exa/langchain_exa/tools.py b/libs/partners/exa/langchain_exa/tools.py index 0e2cdab2d3d..eeb43cdd546 100644 --- a/libs/partners/exa/langchain_exa/tools.py +++ b/libs/partners/exa/langchain_exa/tools.py @@ -22,7 +22,7 @@ class ExaSearchResults(BaseTool): # type: ignore[override] r"""Exa Search tool. Setup: - Install ``langchain-exa`` and set environment variable ``EXA_API_KEY``. + Install `langchain-exa` and set environment variable `EXA_API_KEY`. .. code-block:: bash diff --git a/libs/partners/fireworks/langchain_fireworks/chat_models.py b/libs/partners/fireworks/langchain_fireworks/chat_models.py index 13842070d0e..36021dc87ce 100644 --- a/libs/partners/fireworks/langchain_fireworks/chat_models.py +++ b/libs/partners/fireworks/langchain_fireworks/chat_models.py @@ -259,7 +259,7 @@ class ChatFireworks(BaseChatModel): """`Fireworks` Chat large language models API. To use, you should have the - environment variable ``FIREWORKS_API_KEY`` set with your API key. + environment variable `FIREWORKS_API_KEY` set with your API key. Any parameters that are valid to be passed to the fireworks.create call can be passed in, even if not explicitly saved on this class. @@ -320,7 +320,7 @@ class ChatFireworks(BaseChatModel): ) """Fireworks API key. - Automatically read from env variable ``FIREWORKS_API_KEY`` if not provided. + Automatically read from env variable `FIREWORKS_API_KEY` if not provided. """ fireworks_api_base: str | None = Field( @@ -331,8 +331,8 @@ class ChatFireworks(BaseChatModel): request_timeout: float | tuple[float, float] | Any | None = Field( default=None, alias="timeout" ) - """Timeout for requests to Fireworks completion API. Can be ``float``, - ``httpx.Timeout`` or `None`.""" + """Timeout for requests to Fireworks completion API. Can be `float`, + `httpx.Timeout` or `None`.""" streaming: bool = False """Whether to stream the results or not.""" n: int = 1 @@ -680,7 +680,7 @@ class ChatFireworks(BaseChatModel): - a `TypedDict` class (support added in 0.1.7), - or a Pydantic class. - If ``schema`` is a Pydantic class then the model output will be a + If `schema` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool` @@ -692,15 +692,15 @@ class ChatFireworks(BaseChatModel): method: The method for steering model generation, one of: - - ``'function_calling'``: + - `'function_calling'`: Uses Fireworks's [tool-calling features](https://docs.fireworks.ai/guides/function-calling). - - ``'json_schema'``: + - `'json_schema'`: Uses Fireworks's [structured output feature](https://docs.fireworks.ai/structured-responses/structured-response-formatting). - - ``'json_mode'``: + - `'json_mode'`: Uses Fireworks's [JSON mode feature](https://docs.fireworks.ai/structured-responses/structured-response-formatting). !!! warning "Behavior changed in 0.2.8" - Added support for ``'json_schema'``. + Added support for `'json_schema'`. include_raw: If `False` then only the parsed structured output is returned. If @@ -708,7 +708,7 @@ class ChatFireworks(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. kwargs: Any additional parameters to pass to the @@ -717,16 +717,16 @@ class ChatFireworks(BaseChatModel): Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs - an instance of ``schema`` (i.e., a Pydantic object). + If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs + an instance of `schema` (i.e., a Pydantic object). - Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + Otherwise, if `include_raw` is False then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: + If `include_raw` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None Example: schema=Pydantic class, method="function_calling", include_raw=False: diff --git a/libs/partners/fireworks/langchain_fireworks/embeddings.py b/libs/partners/fireworks/langchain_fireworks/embeddings.py index 40617dd6eb5..6389cab663e 100644 --- a/libs/partners/fireworks/langchain_fireworks/embeddings.py +++ b/libs/partners/fireworks/langchain_fireworks/embeddings.py @@ -10,8 +10,8 @@ class FireworksEmbeddings(BaseModel, Embeddings): Setup: - Install ``langchain_fireworks`` and set environment variable - ``FIREWORKS_API_KEY``. + Install `langchain_fireworks` and set environment variable + `FIREWORKS_API_KEY`. .. code-block:: bash @@ -78,7 +78,7 @@ class FireworksEmbeddings(BaseModel, Embeddings): ) """Fireworks API key. - Automatically read from env variable ``FIREWORKS_API_KEY`` if not provided. + Automatically read from env variable `FIREWORKS_API_KEY` if not provided. """ model: str = "nomic-ai/nomic-embed-text-v1.5" diff --git a/libs/partners/fireworks/langchain_fireworks/llms.py b/libs/partners/fireworks/langchain_fireworks/llms.py index 2d52a92edb6..d2cd1764d67 100644 --- a/libs/partners/fireworks/langchain_fireworks/llms.py +++ b/libs/partners/fireworks/langchain_fireworks/llms.py @@ -25,8 +25,8 @@ class Fireworks(LLM): """LLM models from `Fireworks`. To use, you'll need an [API key](https://fireworks.ai). This can be passed in as - init param ``fireworks_api_key`` or set as environment variable - ``FIREWORKS_API_KEY``. + init param `fireworks_api_key` or set as environment variable + `FIREWORKS_API_KEY`. [Fireworks AI API reference](https://readme.fireworks.ai/) @@ -52,7 +52,7 @@ class Fireworks(LLM): ) """Fireworks API key. - Automatically read from env variable ``FIREWORKS_API_KEY`` if not provided. + Automatically read from env variable `FIREWORKS_API_KEY` if not provided. """ model: str """Model name. [(Available models)](https://readme.fireworks.ai/)""" @@ -66,7 +66,7 @@ class Fireworks(LLM): randomness in the output. """ model_kwargs: dict[str, Any] = Field(default_factory=dict) - """Holds any model parameters valid for ``create`` call not explicitly specified.""" + """Holds any model parameters valid for `create` call not explicitly specified.""" top_k: int | None = None """Used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their diff --git a/libs/partners/groq/langchain_groq/chat_models.py b/libs/partners/groq/langchain_groq/chat_models.py index 5ccaf22f13b..50ee753eebe 100644 --- a/libs/partners/groq/langchain_groq/chat_models.py +++ b/libs/partners/groq/langchain_groq/chat_models.py @@ -64,14 +64,14 @@ class ChatGroq(BaseChatModel): r"""Groq Chat large language models API. To use, you should have the - environment variable ``GROQ_API_KEY`` set with your API key. + environment variable `GROQ_API_KEY` set with your API key. Any parameters that are valid to be passed to the groq.create call can be passed in, even if not explicitly saved on this class. Setup: Install `langchain-groq` and set environment variable - ``GROQ_API_KEY``. + `GROQ_API_KEY`. .. code-block:: bash @@ -80,23 +80,23 @@ class ChatGroq(BaseChatModel): Key init args — completion params: model: str - Name of Groq model to use, e.g. ``llama-3.1-8b-instant``. + Name of Groq model to use, e.g. `llama-3.1-8b-instant`. temperature: float - Sampling temperature. Ranges from ``0.0`` to ``1.0``. + Sampling temperature. Ranges from `0.0` to `1.0`. max_tokens: int | None Max number of tokens to generate. reasoning_format: Literal["parsed", "raw", "hidden] | None - The format for reasoning output. Groq will default to ``raw`` if left + The format for reasoning output. Groq will default to `raw` if left undefined. - - ``'parsed'``: Separates reasoning into a dedicated field while keeping the - response concise. Reasoning will be returned in the - ``additional_kwargs.reasoning_content`` field of the response. - - ``'raw'``: Includes reasoning within think tags (e.g. - ``{reasoning_content}``). - - ``'hidden'``: Returns only the final answer content. Note: this only - supresses reasoning content in the response; the model will still perform - reasoning unless overridden in ``reasoning_effort``. + - `'parsed'`: Separates reasoning into a dedicated field while keeping the + response concise. Reasoning will be returned in the + `additional_kwargs.reasoning_content` field of the response. + - `'raw'`: Includes reasoning within think tags (e.g. + `{reasoning_content}`). + - `'hidden'`: Returns only the final answer content. Note: this only + supresses reasoning content in the response; the model will still perform + reasoning unless overridden in `reasoning_effort`. See the [Groq documentation](https://console.groq.com/docs/reasoning#reasoning) for more details and a list of supported models. @@ -110,7 +110,7 @@ class ChatGroq(BaseChatModel): max_retries: int Max number of retries. api_key: str | None - Groq API key. If not passed in will be read from env var ``GROQ_API_KEY``. + Groq API key. If not passed in will be read from env var `GROQ_API_KEY`. base_url: str | None Base URL path for API requests, leave blank if not using a proxy or service emulator. @@ -201,16 +201,16 @@ class ChatGroq(BaseChatModel): .. code-block:: python - AIMessage(content='The English sentence "I love programming" can - be translated to French as "J\'aime programmer". The word - "programming" is translated as "programmer" in French. I hope - this helps! Let me know if you have any other questions.', - response_metadata={'token_usage': {'completion_tokens': 53, - 'prompt_tokens': 28, 'total_tokens': 81, 'completion_time': - 0.083623752, 'prompt_time': 0.007365126, 'queue_time': None, - 'total_time': 0.090988878}, 'model_name': 'llama-3.1-8b-instant', - 'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop', - 'logprobs': None}, id='run-897f3391-1bea-42e2-82e0-686e2367bcf8-0') + AIMessage(content='The English sentence "I love programming" can + be translated to French as "J\'aime programmer". The word + "programming" is translated as "programmer" in French. I hope + this helps! Let me know if you have any other questions.', + response_metadata={'token_usage': {'completion_tokens': 53, + 'prompt_tokens': 28, 'total_tokens': 81, 'completion_time': + 0.083623752, 'prompt_time': 0.007365126, 'queue_time': None, + 'total_time': 0.090988878}, 'model_name': 'llama-3.1-8b-instant', + 'system_fingerprint': 'fp_c5f20b5bb1', 'finish_reason': 'stop', + 'logprobs': None}, id='run-897f3391-1bea-42e2-82e0-686e2367bcf8-0') Tool calling: .. code-block:: python @@ -244,7 +244,7 @@ class ChatGroq(BaseChatModel): } ] - See ``ChatGroq.bind_tools()`` method for more. + See `ChatGroq.bind_tools()` method for more. Structured output: .. code-block:: python @@ -273,7 +273,7 @@ class ChatGroq(BaseChatModel): rating=None, ) - See ``ChatGroq.with_structured_output()`` for more. + See `ChatGroq.with_structured_output()` for more. Response metadata: .. code-block:: python @@ -312,14 +312,14 @@ class ChatGroq(BaseChatModel): reasoning_format: Literal["parsed", "raw", "hidden"] | None = Field(default=None) """The format for reasoning output. Groq will default to raw if left undefined. - - ``'parsed'``: Separates reasoning into a dedicated field while keeping the - response concise. Reasoning will be returned in the - ``additional_kwargs.reasoning_content`` field of the response. - - ``'raw'``: Includes reasoning within think tags (e.g. - ``{reasoning_content}``). - - ``'hidden'``: Returns only the final answer content. Note: this only supresses - reasoning content in the response; the model will still perform reasoning unless - overridden in ``reasoning_effort``. + - `'parsed'`: Separates reasoning into a dedicated field while keeping the + response concise. Reasoning will be returned in the + `additional_kwargs.reasoning_content` field of the response. + - `'raw'`: Includes reasoning within think tags (e.g. + `{reasoning_content}`). + - `'hidden'`: Returns only the final answer content. Note: this only supresses + reasoning content in the response; the model will still perform reasoning unless + overridden in `reasoning_effort`. See the [Groq documentation](https://console.groq.com/docs/reasoning#reasoning) for more details and a list of supported models. @@ -337,7 +337,7 @@ class ChatGroq(BaseChatModel): groq_api_key: SecretStr | None = Field( alias="api_key", default_factory=secret_from_env("GROQ_API_KEY", default=None) ) - """Automatically inferred from env var ``GROQ_API_KEY`` if not provided.""" + """Automatically inferred from env var `GROQ_API_KEY` if not provided.""" groq_api_base: str | None = Field( alias="base_url", default_factory=from_env("GROQ_API_BASE", default=None) ) @@ -348,7 +348,7 @@ class ChatGroq(BaseChatModel): request_timeout: float | tuple[float, float] | Any | None = Field( default=None, alias="timeout" ) - """Timeout for requests to Groq completion API. Can be float, ``httpx.Timeout`` or + """Timeout for requests to Groq completion API. Can be float, `httpx.Timeout` or None.""" max_retries: int = 2 """Maximum number of retries to make when generating.""" @@ -362,12 +362,12 @@ class ChatGroq(BaseChatModel): """Optional parameter that you can include to specify the service tier you'd like to use for requests. - - ``'on_demand'``: Default. - - ``'flex'``: On-demand processing when capacity is available, with rapid timeouts - if resources are constrained. Provides balance between performance and reliability - for workloads that don't require guaranteed processing. - - `'auto'`: Uses on-demand rate limits, then falls back to ``'flex'`` if those - limits are exceeded + - `'on_demand'`: Default. + - `'flex'`: On-demand processing when capacity is available, with rapid timeouts + if resources are constrained. Provides balance between performance and + reliability for workloads that don't require guaranteed processing. + - `'auto'`: Uses on-demand rate limits, then falls back to `'flex'` if those + limits are exceeded See the [Groq documentation](https://console.groq.com/docs/flex-processing) for more details and a list of service tiers and descriptions. @@ -377,10 +377,10 @@ class ChatGroq(BaseChatModel): # Configure a custom httpx client. See the # [httpx documentation](https://www.python-httpx.org/api/#client) for more details. http_client: Any | None = None - """Optional ``httpx.Client``.""" + """Optional `httpx.Client`.""" http_async_client: Any | None = None - """Optional ``httpx.AsyncClient``. Only used for async invocations. Must specify - ``http_client`` as well if you'd like a custom client for sync invocations.""" + """Optional `httpx.AsyncClient`. Only used for async invocations. Must specify + `http_client` as well if you'd like a custom client for sync invocations.""" model_config = ConfigDict( populate_by_name=True, @@ -832,7 +832,7 @@ class ChatGroq(BaseChatModel): - a `TypedDict` class (supported added in 0.1.9), - or a Pydantic class. - If ``schema`` is a Pydantic class then the model output will be a + If `schema` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool` @@ -844,19 +844,19 @@ class ChatGroq(BaseChatModel): !!! warning "Behavior changed in 0.3.8" Added support for Groq's dedicated structured output feature via - ``method="json_schema"``. + `method="json_schema"`. method: The method for steering model generation, one of: - - ``'function_calling'``: + - `'function_calling'`: Uses Groq's tool-calling [API](https://console.groq.com/docs/tool-use) - - ``'json_schema'``: + - `'json_schema'`: Uses Groq's [Structured Output API](https://console.groq.com/docs/structured-outputs). - Supported for a subset of models, including ``openai/gpt-oss``, - ``moonshotai/kimi-k2-instruct-0905``, and some ``meta-llama/llama-4`` + Supported for a subset of models, including `openai/gpt-oss`, + `moonshotai/kimi-k2-instruct-0905`, and some `meta-llama/llama-4` models. See [docs](https://console.groq.com/docs/structured-outputs) for details. - - ``'json_mode'``: + - `'json_mode'`: Uses Groq's [JSON mode](https://console.groq.com/docs/structured-outputs#json-object-mode). Note that if using JSON mode then you must include instructions for formatting the output into the desired schema into the model call @@ -865,18 +865,18 @@ class ChatGroq(BaseChatModel): support which methods [here](https://console.groq.com/docs/structured-outputs). method: - The method for steering model generation, either ``'function_calling'`` - or ``'json_mode'``. If ``'function_calling'`` then the schema will be converted + The method for steering model generation, either `'function_calling'` + or `'json_mode'`. If `'function_calling'` then the schema will be converted to an OpenAI function and the returned model will make use of the - function-calling API. If ``'json_mode'`` then JSON mode will be used. + function-calling API. If `'json_mode'` then JSON mode will be used. !!! note - If using ``'json_mode'`` then you must include instructions for formatting + If using `'json_mode'` then you must include instructions for formatting the output into the desired schema into the model call. (either via the prompt itself or in the system message/prompt/instructions). !!! warning - ``'json_mode'`` does not support streaming responses stop sequences. + `'json_mode'` does not support streaming responses stop sequences. include_raw: If `False` then only the parsed structured output is returned. If @@ -884,7 +884,7 @@ class ChatGroq(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. kwargs: Any additional parameters to pass to the @@ -893,16 +893,16 @@ class ChatGroq(BaseChatModel): Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs - an instance of ``schema`` (i.e., a Pydantic object). + If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs + an instance of `schema` (i.e., a Pydantic object). - Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + Otherwise, if `include_raw` is False then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: + If `include_raw` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None Example: schema=Pydantic class, method="function_calling", include_raw=False: diff --git a/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py b/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py index 2d6ff6b11c8..639b6bffbaa 100644 --- a/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py +++ b/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py @@ -889,9 +889,9 @@ class ChatHuggingFace(BaseChatModel): method: The method for steering model generation, one of: - - ``'function_calling'``: uses tool-calling features. - - ``'json_schema'``: uses dedicated structured output features. - - ``'json_mode'``: uses JSON mode. + - `'function_calling'`: uses tool-calling features. + - `'json_schema'`: uses dedicated structured output features. + - `'json_mode'`: uses JSON mode. include_raw: If `False` then only the parsed structured output is returned. If @@ -899,7 +899,7 @@ class ChatHuggingFace(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. kwargs: Additional parameters to pass to the underlying LLM's @@ -909,16 +909,16 @@ class ChatHuggingFace(BaseChatModel): Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs - an instance of ``schema`` (i.e., a Pydantic object). + If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs + an instance of `schema` (i.e., a Pydantic object). - Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + Otherwise, if `include_raw` is False then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: + If `include_raw` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None """ # noqa: E501 _ = kwargs.pop("strict", None) diff --git a/libs/partners/huggingface/langchain_huggingface/embeddings/huggingface.py b/libs/partners/huggingface/langchain_huggingface/embeddings/huggingface.py index 179c99d6901..ab19422f629 100644 --- a/libs/partners/huggingface/langchain_huggingface/embeddings/huggingface.py +++ b/libs/partners/huggingface/langchain_huggingface/embeddings/huggingface.py @@ -20,7 +20,7 @@ _MIN_OPTIMUM_VERSION = "1.22" class HuggingFaceEmbeddings(BaseModel, Embeddings): """HuggingFace sentence_transformers embedding models. - To use, you should have the ``sentence_transformers`` python package installed. + To use, you should have the `sentence_transformers` python package installed. Example: .. code-block:: python diff --git a/libs/partners/huggingface/langchain_huggingface/embeddings/huggingface_endpoint.py b/libs/partners/huggingface/langchain_huggingface/embeddings/huggingface_endpoint.py index dc2b2584879..4fad630c73c 100644 --- a/libs/partners/huggingface/langchain_huggingface/embeddings/huggingface_endpoint.py +++ b/libs/partners/huggingface/langchain_huggingface/embeddings/huggingface_endpoint.py @@ -15,8 +15,8 @@ VALID_TASKS = ("feature-extraction",) class HuggingFaceEndpointEmbeddings(BaseModel, Embeddings): """HuggingFaceHub embedding models. - To use, you should have the ``huggingface_hub`` python package installed, and the - environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass + To use, you should have the `huggingface_hub` python package installed, and the + environment variable `HUGGINGFACEHUB_API_TOKEN` set with your API token, or pass it as a named parameter to the constructor. Example: @@ -39,7 +39,7 @@ class HuggingFaceEndpointEmbeddings(BaseModel, Embeddings): """Model name to use.""" provider: str | None = None """Name of the provider to use for inference with the model specified in - ``repo_id``. e.g. "sambanova". if not specified, defaults to HF Inference API. + `repo_id`. e.g. "sambanova". if not specified, defaults to HF Inference API. available providers can be found in the [huggingface_hub documentation](https://huggingface.co/docs/huggingface_hub/guides/inference#supported-providers-and-tasks).""" repo_id: str | None = None """Huggingfacehub repository id, for backward compatibility.""" diff --git a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py index cdc7423039c..eb3720833a4 100644 --- a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py +++ b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py @@ -29,8 +29,8 @@ VALID_TASKS = ( class HuggingFaceEndpoint(LLM): """Hugging Face Endpoint. This works with any model that supports text generation (i.e. text completion) task. - To use this class, you should have installed the ``huggingface_hub`` package, and - the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, + To use this class, you should have installed the `huggingface_hub` package, and + the environment variable `HUGGINGFACEHUB_API_TOKEN` set with your API token, or given as a named parameter to the constructor. Example: diff --git a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_pipeline.py b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_pipeline.py index c4d1fedcfd8..1bd278fb221 100644 --- a/libs/partners/huggingface/langchain_huggingface/llms/huggingface_pipeline.py +++ b/libs/partners/huggingface/langchain_huggingface/llms/huggingface_pipeline.py @@ -37,7 +37,7 @@ logger = logging.getLogger(__name__) class HuggingFacePipeline(BaseLLM): """HuggingFace Pipeline API. - To use, you should have the ``transformers`` python package installed. + To use, you should have the `transformers` python package installed. Only supports `text-generation`, `text2text-generation`, `image-text-to-text`, `summarization` and `translation` for now. diff --git a/libs/partners/mistralai/langchain_mistralai/chat_models.py b/libs/partners/mistralai/langchain_mistralai/chat_models.py index d63d2fef21e..9cdadbece5e 100644 --- a/libs/partners/mistralai/langchain_mistralai/chat_models.py +++ b/libs/partners/mistralai/langchain_mistralai/chat_models.py @@ -406,8 +406,8 @@ class ChatMistralAI(BaseChatModel): max_tokens: int | None = None top_p: float = 1 """Decode using nucleus sampling: consider the smallest set of tokens whose - probability sum is at least ``top_p``. Must be in the closed interval - ``[0.0, 1.0]``.""" + probability sum is at least `top_p`. Must be in the closed interval + `[0.0, 1.0]`.""" random_seed: int | None = None safe_mode: bool | None = None streaming: bool = False @@ -710,7 +710,7 @@ class ChatMistralAI(BaseChatModel): (if any), or a dict of the form: {"type": "function", "function": {"name": <>}}. kwargs: Any additional parameters are passed directly to - ``self.bind(**kwargs)``. + `self.bind(**kwargs)`. """ formatted_tools = [convert_to_openai_tool(tool) for tool in tools] @@ -752,7 +752,7 @@ class ChatMistralAI(BaseChatModel): - a `TypedDict` class (support added in 0.1.12), - or a Pydantic class. - If ``schema`` is a Pydantic class then the model output will be a + If `schema` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool` @@ -764,13 +764,13 @@ class ChatMistralAI(BaseChatModel): method: The method for steering model generation, one of: - - ``'function_calling'``: + - `'function_calling'`: Uses Mistral's [function-calling feature](https://docs.mistral.ai/capabilities/function_calling/). - - ``'json_schema'``: + - `'json_schema'`: Uses Mistral's [structured output feature](https://docs.mistral.ai/capabilities/structured-output/custom_structured_output/). - - ``'json_mode'``: + - `'json_mode'`: Uses Mistral's [JSON mode](https://docs.mistral.ai/capabilities/structured-output/json_mode/). Note that if using JSON mode then you @@ -786,10 +786,10 @@ class ChatMistralAI(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. kwargs: Any additional parameters are passed directly to - ``self.bind(**kwargs)``. This is useful for passing in + `self.bind(**kwargs)`. This is useful for passing in parameters such as `tool_choice` or `tools` to control which tool the model should call, or to pass in parameters such as `stop` to control when the model should stop generating output. @@ -797,15 +797,15 @@ class ChatMistralAI(BaseChatModel): Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs - an instance of ``schema`` (i.e., a Pydantic object). + If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs + an instance of `schema` (i.e., a Pydantic object). - Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + Otherwise, if `include_raw` is False then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + If `include_raw` is True, then Runnable outputs a dict with keys: + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None Example: schema=Pydantic class, method="function_calling", include_raw=False: .. code-block:: python diff --git a/libs/partners/mistralai/langchain_mistralai/embeddings.py b/libs/partners/mistralai/langchain_mistralai/embeddings.py index 4c41edba113..2b150aef6a0 100644 --- a/libs/partners/mistralai/langchain_mistralai/embeddings.py +++ b/libs/partners/mistralai/langchain_mistralai/embeddings.py @@ -41,8 +41,8 @@ class MistralAIEmbeddings(BaseModel, Embeddings): """MistralAI embedding model integration. Setup: - Install ``langchain_mistralai`` and set environment variable - ``MISTRAL_API_KEY``. + Install `langchain_mistralai` and set environment variable + `MISTRAL_API_KEY`. .. code-block:: bash @@ -56,7 +56,7 @@ class MistralAIEmbeddings(BaseModel, Embeddings): Key init args — client params: api_key: SecretStr | None The API key for the MistralAI API. If not provided, it will be read from the - environment variable ``MISTRAL_API_KEY``. + environment variable `MISTRAL_API_KEY`. max_retries: int The number of times to retry a request if it fails. timeout: int diff --git a/libs/partners/nomic/langchain_nomic/embeddings.py b/libs/partners/nomic/langchain_nomic/embeddings.py index 86f940dc2c0..a4a3537ca68 100644 --- a/libs/partners/nomic/langchain_nomic/embeddings.py +++ b/libs/partners/nomic/langchain_nomic/embeddings.py @@ -69,16 +69,16 @@ class NomicEmbeddings(Embeddings): Args: model: model name - nomic_api_key: optionally, set the Nomic API key. Uses the ``NOMIC_API_KEY`` + nomic_api_key: optionally, set the Nomic API key. Uses the `NOMIC_API_KEY` environment variable by default. dimensionality: The embedding dimension, for use with Matryoshka-capable models. Defaults to full-size. - inference_mode: How to generate embeddings. One of ``'remote'``, ``'local'`` - (Embed4All), or ``'dynamic'`` (automatic). Defaults to ``'remote'``. + inference_mode: How to generate embeddings. One of `'remote'`, `'local'` + (Embed4All), or `'dynamic'` (automatic). Defaults to `'remote'`. device: The device to use for local embeddings. Choices include - ``'cpu'``, ``'gpu'``, ``'nvidia'``, ``'amd'``, or a specific device - name. See the docstring for ``GPT4All.__init__`` for more info. - Typically defaults to ``'cpu'``. Do not use on macOS. + `'cpu'`, `'gpu'`, `'nvidia'`, `'amd'`, or a specific device + name. See the docstring for `GPT4All.__init__` for more info. + Typically defaults to `'cpu'`. Do not use on macOS. vision_model: The vision model to use for image embeddings. """ @@ -96,8 +96,8 @@ class NomicEmbeddings(Embeddings): Args: texts: list of texts to embed - task_type: the task type to use when embedding. One of ``'search_query'``, - ``'search_document'``, ``'classification'``, ``'clustering'`` + task_type: the task type to use when embedding. One of `'search_query'`, + `'search_document'`, `'classification'`, `'clustering'` """ output = embed.text( diff --git a/libs/partners/ollama/langchain_ollama/__init__.py b/libs/partners/ollama/langchain_ollama/__init__.py index 8382ffce694..15f8a13090a 100644 --- a/libs/partners/ollama/langchain_ollama/__init__.py +++ b/libs/partners/ollama/langchain_ollama/__init__.py @@ -4,7 +4,7 @@ Provides infrastructure for interacting with the [Ollama](https://ollama.com/) service. !!! note - **Newly added in 0.3.4:** ``validate_model_on_init`` param on all models. + **Newly added in 0.3.4:** `validate_model_on_init` param on all models. This parameter allows you to validate the model exists in Ollama locally on initialization. If set to `True`, it will raise an error if the model does not exist locally. This is useful for ensuring that the model is available before diff --git a/libs/partners/ollama/langchain_ollama/_utils.py b/libs/partners/ollama/langchain_ollama/_utils.py index 301d6975d24..c64e2cdbfa5 100644 --- a/libs/partners/ollama/langchain_ollama/_utils.py +++ b/libs/partners/ollama/langchain_ollama/_utils.py @@ -58,10 +58,10 @@ def parse_url_with_auth( url: The URL to parse. Returns: - A tuple of ``(cleaned_url, headers_dict)`` where: - - ``cleaned_url`` is the URL without authentication credentials if any were + A tuple of `(cleaned_url, headers_dict)` where: + - `cleaned_url` is the URL without authentication credentials if any were found. Otherwise, returns the original URL. - - ``headers_dict`` contains Authorization header if credentials were found. + - `headers_dict` contains Authorization header if credentials were found. """ if not url: return None, None @@ -106,7 +106,7 @@ def merge_auth_headers( Args: client_kwargs: The client kwargs dict to update. - auth_headers: Headers to merge (typically from ``parse_url_with_auth``). + auth_headers: Headers to merge (typically from `parse_url_with_auth`). """ if auth_headers: headers = client_kwargs.get("headers", {}) diff --git a/libs/partners/ollama/langchain_ollama/chat_models.py b/libs/partners/ollama/langchain_ollama/chat_models.py index ce236f2552a..8a0ff824683 100644 --- a/libs/partners/ollama/langchain_ollama/chat_models.py +++ b/libs/partners/ollama/langchain_ollama/chat_models.py @@ -120,8 +120,8 @@ def _parse_json_string( ) -> Any: """Attempt to parse a JSON string for tool calling. - It first tries to use the standard ``json.loads``. If that fails, it falls - back to ``ast.literal_eval`` to safely parse Python literals, which is more + It first tries to use the standard `json.loads`. If that fails, it falls + back to `ast.literal_eval` to safely parse Python literals, which is more robust against models using single quotes or containing apostrophes. Args: @@ -133,7 +133,7 @@ def _parse_json_string( The parsed JSON string or Python literal. Raises: - OutputParserException: If the string is invalid and ``skip=False``. + OutputParserException: If the string is invalid and `skip=False`. """ try: return json.loads(json_string) @@ -274,17 +274,17 @@ class ChatOllama(BaseChatModel): [supported models](https://ollama.com/search?c=thinking). - `True`: Enables reasoning mode. The model's reasoning process will be - captured and returned separately in the ``additional_kwargs`` of the - response message, under ``reasoning_content``. The main response - content will not include the reasoning tags. + captured and returned separately in the `additional_kwargs` of the + response message, under `reasoning_content`. The main response + content will not include the reasoning tags. - `False`: Disables reasoning mode. The model will not perform any reasoning, - and the response will not include any reasoning content. + and the response will not include any reasoning content. - `None` (Default): The model will use its default reasoning behavior. Note - however, if the model's default behavior *is* to perform reasoning, think tags - (```` and ````) will be present within the main response content - unless you set ``reasoning`` to `True`. + however, if the model's default behavior *is* to perform reasoning, think tags + (`` and ``) will be present within the main response content + unless you set `reasoning` to `True`. temperature: float - Sampling temperature. Ranges from ``0.0`` to ``1.0``. + Sampling temperature. Ranges from `0.0` to `1.0`. num_predict: int | None Max number of tokens to generate. @@ -481,15 +481,15 @@ class ChatOllama(BaseChatModel): Thinking / Reasoning: You can enable reasoning mode for models that support it by setting - the ``reasoning`` parameter to `True` in either the constructor or + the `reasoning` parameter to `True` in either the constructor or the `invoke`/`stream` methods. This will enable the model to think through the problem and return the reasoning process separately in the - ``additional_kwargs`` of the response message, under ``reasoning_content``. + `additional_kwargs` of the response message, under `reasoning_content`. - If ``reasoning`` is set to `None`, the model will use its default reasoning + If `reasoning` is set to `None`, the model will use its default reasoning behavior, and any reasoning content will *not* be captured under the - ``reasoning_content`` key, but will be present within the main response content - as think tags (```` and ````). + `reasoning_content` key, but will be present within the main response content + as think tags (`` and ``). !!! note This feature is only available for [models that support reasoning](https://ollama.com/search?c=thinking). @@ -527,19 +527,19 @@ class ChatOllama(BaseChatModel): """Controls the reasoning/thinking mode for [supported models](https://ollama.com/search?c=thinking). - `True`: Enables reasoning mode. The model's reasoning process will be - captured and returned separately in the ``additional_kwargs`` of the - response message, under ``reasoning_content``. The main response - content will not include the reasoning tags. + captured and returned separately in the `additional_kwargs` of the + response message, under `reasoning_content`. The main response + content will not include the reasoning tags. - `False`: Disables reasoning mode. The model will not perform any reasoning, - and the response will not include any reasoning content. + and the response will not include any reasoning content. - `None` (Default): The model will use its default reasoning behavior. Note - however, if the model's default behavior *is* to perform reasoning, think tags - ()```` and ````) will be present within the main response content - unless you set ``reasoning`` to `True`. - - `str`: e.g. `'low'`, ``'medium'``, `'high'`. Enables reasoning with a custom - intensity level. Currently, this is only supported ``gpt-oss``. See the - [Ollama docs](https://github.com/ollama/ollama-python/blob/da79e987f0ac0a4986bf396f043b36ef840370bc/ollama/_types.py#L210) - for more information. + however, if the model's default behavior *is* to perform reasoning, think tags + (`` and ``) will be present within the main response content + unless you set `reasoning` to `True`. + - `str`: e.g. `'low'`, `'medium'`, `'high'`. Enables reasoning with a custom + intensity level. Currently, this is only supported `gpt-oss`. See the + [Ollama docs](https://github.com/ollama/ollama-python/blob/da79e987f0ac0a4986bf396f043b36ef840370bc/ollama/_types.py#L210) + for more information. """ validate_model_on_init: bool = False @@ -560,7 +560,7 @@ class ChatOllama(BaseChatModel): A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. - (Default: ``0.1``) + (Default: `0.1`) """ mirostat_tau: float | None = None @@ -568,13 +568,13 @@ class ChatOllama(BaseChatModel): A lower value will result in more focused and coherent text. - (Default: ``5.0``) + (Default: `5.0`) """ num_ctx: int | None = None """Sets the size of the context window used to generate the next token. - (Default: ``2048``) + (Default: `2048`) """ num_gpu: int | None = None @@ -594,20 +594,20 @@ class ChatOllama(BaseChatModel): num_predict: int | None = None """Maximum number of tokens to predict when generating text. - (Default: ``128``, ``-1`` = infinite generation, ``-2`` = fill context) + (Default: `128`, `-1` = infinite generation, `-2` = fill context) """ repeat_last_n: int | None = None """Sets how far back for the model to look back to prevent repetition. - (Default: ``64``, `0` = disabled, ``-1`` = ``num_ctx``) + (Default: `64`, `0` = disabled, `-1` = `num_ctx`) """ repeat_penalty: float | None = None """Sets how strongly to penalize repetitions. - A higher value (e.g., ``1.5``) will penalize repetitions more strongly, while a - lower value (e.g., ``0.9``) will be more lenient. (Default: ``1.1``) + A higher value (e.g., `1.5`) will penalize repetitions more strongly, while a + lower value (e.g., `0.9`) will be more lenient. (Default: `1.1`) """ temperature: float | None = None @@ -615,7 +615,7 @@ class ChatOllama(BaseChatModel): Increasing the temperature will make the model answer more creatively. - (Default: ``0.8``) + (Default: `0.8`) """ seed: int | None = None @@ -633,7 +633,7 @@ class ChatOllama(BaseChatModel): Used to reduce the impact of less probable tokens from the output. - A higher value (e.g., ``2.0``) will reduce the impact more, while a value of ``1.0`` + A higher value (e.g., `2.0`) will reduce the impact more, while a value of `1.0` disables this setting. (Default: `1`) @@ -642,23 +642,23 @@ class ChatOllama(BaseChatModel): top_k: int | None = None """Reduces the probability of generating nonsense. - A higher value (e.g. ``100``) will give more diverse answers, while a lower value - (e.g. ``10``) will be more conservative. + A higher value (e.g. `100`) will give more diverse answers, while a lower value + (e.g. `10`) will be more conservative. - (Default: ``40``) + (Default: `40`) """ top_p: float | None = None """Works together with top-k. - A higher value (e.g., ``0.95``) will lead to more diverse text, while a lower value - (e.g., ``0.5``) will generate more focused and conservative text. + A higher value (e.g., `0.95`) will lead to more diverse text, while a lower value + (e.g., `0.5`) will generate more focused and conservative text. - (Default: ``0.9``) + (Default: `0.9`) """ format: Literal["", "json"] | JsonSchemaValue | None = None - """Specify the format of the output (options: ``'json'``, JSON schema).""" + """Specify the format of the output (options: `'json'`, JSON schema).""" keep_alive: int | str | None = None """How long the model will stay loaded into memory.""" @@ -690,7 +690,7 @@ class ChatOllama(BaseChatModel): These arguments are passed to both synchronous and async clients. - Use ``sync_client_kwargs`` and ``async_client_kwargs`` to pass different arguments + Use `sync_client_kwargs` and `async_client_kwargs` to pass different arguments to synchronous and asynchronous clients. """ @@ -1242,7 +1242,7 @@ class ChatOllama(BaseChatModel): tool_choice: If provided, which tool for model to call. **This parameter is currently ignored as it is not supported by Ollama.** kwargs: Any additional parameters are passed directly to - ``self.bind(**kwargs)``. + `self.bind(**kwargs)`. """ formatted_tools = [convert_to_openai_tool(tool) for tool in tools] return super().bind(tools=formatted_tools, **kwargs) @@ -1265,7 +1265,7 @@ class ChatOllama(BaseChatModel): - a `TypedDict` class - an OpenAI function/tool schema. - If ``schema`` is a Pydantic class then the model output will be a + If `schema` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool` @@ -1274,12 +1274,12 @@ class ChatOllama(BaseChatModel): method: The method for steering model generation, one of: - - ``'json_schema'``: + - `'json_schema'`: Uses Ollama's [structured output API](https://ollama.com/blog/structured-outputs) - - ``'function_calling'``: + - `'function_calling'`: Uses Ollama's tool-calling API - - ``'json_mode'``: - Specifies ``format='json'``. Note that if using JSON mode then you + - `'json_mode'`: + Specifies `format='json'`. Note that if using JSON mode then you must include instructions for formatting the output into the desired schema into the model call. @@ -1289,26 +1289,26 @@ class ChatOllama(BaseChatModel): then both the raw model response (a `BaseMessage`) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. kwargs: Additional keyword args aren't supported. Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs an instance of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is False then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: + If `include_raw` is True, then Runnable outputs a dict with keys: - - ``'raw'``: `BaseMessage` - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: `BaseMessage` + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None !!! warning "Behavior changed in 0.2.2" - Added support for structured output API via ``format`` parameter. + Added support for structured output API via `format` parameter. !!! warning "Behavior changed in 0.3.0" - Updated default ``method`` to ``'json_schema'``. + Updated default `method` to `'json_schema'`. ??? note "Example: `schema=Pydantic` class, `method='json_schema'`, `include_raw=False`" diff --git a/libs/partners/ollama/langchain_ollama/embeddings.py b/libs/partners/ollama/langchain_ollama/embeddings.py index 103690efa5e..534569f591b 100644 --- a/libs/partners/ollama/langchain_ollama/embeddings.py +++ b/libs/partners/ollama/langchain_ollama/embeddings.py @@ -23,7 +23,7 @@ class OllamaEmbeddings(BaseModel, Embeddings): You can view a list of available models via [the model library](https://ollama.com/library). - To fetch a model from the Ollama model library use ``ollama pull ``. + To fetch a model from the Ollama model library use `ollama pull `. For example, to pull the llama3 model: @@ -34,11 +34,11 @@ class OllamaEmbeddings(BaseModel, Embeddings): This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model. - * On Mac, the models will be downloaded to ``~/.ollama/models`` - * On Linux (or WSL), the models will be stored at ``/usr/share/ollama/.ollama/models`` + * On Mac, the models will be downloaded to `~/.ollama/models` + * On Linux (or WSL), the models will be stored at `/usr/share/ollama/.ollama/models` You can specify the exact version of the model of interest - as such ``ollama pull vicuna:13b-v1.5-16k-q4_0``. + as such `ollama pull vicuna:13b-v1.5-16k-q4_0`. To view pulled models: @@ -155,7 +155,7 @@ class OllamaEmbeddings(BaseModel, Embeddings): These arguments are passed to both synchronous and async clients. - Use ``sync_client_kwargs`` and ``async_client_kwargs`` to pass different arguments + Use `sync_client_kwargs` and `async_client_kwargs` to pass different arguments to synchronous and asynchronous clients. """ @@ -189,16 +189,16 @@ class OllamaEmbeddings(BaseModel, Embeddings): """Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make - the algorithm more responsive. (Default: ``0.1``)""" + the algorithm more responsive. (Default: `0.1`)""" mirostat_tau: float | None = None """Controls the balance between coherence and diversity of the output. A lower value will result in more focused and - coherent text. (Default: ``5.0``)""" + coherent text. (Default: `5.0`)""" num_ctx: int | None = None """Sets the size of the context window used to generate the - next token. (Default: ``2048``) """ + next token. (Default: `2048`) """ num_gpu: int | None = None """The number of GPUs to use. On macOS it defaults to `1` to @@ -206,7 +206,7 @@ class OllamaEmbeddings(BaseModel, Embeddings): keep_alive: int | None = None """Controls how long the model will stay loaded into memory - following the request (default: ``5m``) + following the request (default: `5m`) """ num_thread: int | None = None @@ -217,34 +217,34 @@ class OllamaEmbeddings(BaseModel, Embeddings): repeat_last_n: int | None = None """Sets how far back for the model to look back to prevent - repetition. (Default: ``64``, `0` = disabled, ``-1`` = ``num_ctx``)""" + repetition. (Default: `64`, `0` = disabled, `-1` = `num_ctx`)""" repeat_penalty: float | None = None - """Sets how strongly to penalize repetitions. A higher value (e.g., ``1.5``) - will penalize repetitions more strongly, while a lower value (e.g., ``0.9``) - will be more lenient. (Default: ``1.1``)""" + """Sets how strongly to penalize repetitions. A higher value (e.g., `1.5`) + will penalize repetitions more strongly, while a lower value (e.g., `0.9`) + will be more lenient. (Default: `1.1`)""" temperature: float | None = None """The temperature of the model. Increasing the temperature will - make the model answer more creatively. (Default: ``0.8``)""" + make the model answer more creatively. (Default: `0.8`)""" stop: list[str] | None = None """Sets the stop tokens to use.""" tfs_z: float | None = None """Tail free sampling is used to reduce the impact of less probable - tokens from the output. A higher value (e.g., ``2.0``) will reduce the - impact more, while a value of ``1.0`` disables this setting. (default: `1`)""" + tokens from the output. A higher value (e.g., `2.0`) will reduce the + impact more, while a value of `1.0` disables this setting. (default: `1`)""" top_k: int | None = None - """Reduces the probability of generating nonsense. A higher value (e.g. ``100``) - will give more diverse answers, while a lower value (e.g. ``10``) - will be more conservative. (Default: ``40``)""" + """Reduces the probability of generating nonsense. A higher value (e.g. `100`) + will give more diverse answers, while a lower value (e.g. `10`) + will be more conservative. (Default: `40`)""" top_p: float | None = None - """Works together with top-k. A higher value (e.g., ``0.95``) will lead - to more diverse text, while a lower value (e.g., ``0.5``) will - generate more focused and conservative text. (Default: ``0.9``)""" + """Works together with top-k. A higher value (e.g., `0.95`) will lead + to more diverse text, while a lower value (e.g., `0.5`) will + generate more focused and conservative text. (Default: `0.9`)""" model_config = ConfigDict( extra="forbid", diff --git a/libs/partners/ollama/langchain_ollama/llms.py b/libs/partners/ollama/langchain_ollama/llms.py index 79bb35b80d7..c68fbd06999 100644 --- a/libs/partners/ollama/langchain_ollama/llms.py +++ b/libs/partners/ollama/langchain_ollama/llms.py @@ -28,7 +28,7 @@ class OllamaLLM(BaseLLM): pip install -U langchain-ollama # Visit https://ollama.com/download to download and install Ollama - # (Linux users): start the server with ``ollama serve`` + # (Linux users): start the server with `ollama serve` Download a model to use: @@ -38,7 +38,7 @@ class OllamaLLM(BaseLLM): Key init args — generation params: model: str - Name of the Ollama model to use (e.g. ``'llama4'``). + Name of the Ollama model to use (e.g. `'llama4'`). temperature: float | None Sampling temperature. Higher values make output more creative. num_predict: int | None @@ -117,14 +117,14 @@ class OllamaLLM(BaseLLM): [supported models](https://ollama.com/search?c=thinking). - `True`: Enables reasoning mode. The model's reasoning process will be - captured and returned separately in the ``additional_kwargs`` of the - response message, under ``reasoning_content``. The main response - content will not include the reasoning tags. + captured and returned separately in the `additional_kwargs` of the + response message, under `reasoning_content`. The main response + content will not include the reasoning tags. - `False`: Disables reasoning mode. The model will not perform any reasoning, - and the response will not include any reasoning content. + and the response will not include any reasoning content. - `None` (Default): The model will use its default reasoning behavior. If - the model performs reasoning, the ```` and ```` tags will - be present directly within the main response content.""" + the model performs reasoning, the `` and `` tags will + be present directly within the main response content.""" validate_model_on_init: bool = False """Whether to validate the model exists in ollama locally on initialization. @@ -140,16 +140,16 @@ class OllamaLLM(BaseLLM): """Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make - the algorithm more responsive. (Default: ``0.1``)""" + the algorithm more responsive. (Default: `0.1`)""" mirostat_tau: float | None = None """Controls the balance between coherence and diversity of the output. A lower value will result in more focused and - coherent text. (Default: ``5.0``)""" + coherent text. (Default: `5.0`)""" num_ctx: int | None = None """Sets the size of the context window used to generate the - next token. (Default: ``2048``)""" + next token. (Default: `2048`)""" num_gpu: int | None = None """The number of GPUs to use. On macOS it defaults to `1` to @@ -163,20 +163,20 @@ class OllamaLLM(BaseLLM): num_predict: int | None = None """Maximum number of tokens to predict when generating text. - (Default: ``128``, ``-1`` = infinite generation, ``-2`` = fill context)""" + (Default: `128`, `-1` = infinite generation, `-2` = fill context)""" repeat_last_n: int | None = None """Sets how far back for the model to look back to prevent - repetition. (Default: ``64``, `0` = disabled, ``-1`` = ``num_ctx``)""" + repetition. (Default: `64`, `0` = disabled, `-1` = `num_ctx`)""" repeat_penalty: float | None = None - """Sets how strongly to penalize repetitions. A higher value (e.g., ``1.5``) - will penalize repetitions more strongly, while a lower value (e.g., ``0.9``) - will be more lenient. (Default: ``1.1``)""" + """Sets how strongly to penalize repetitions. A higher value (e.g., `1.5`) + will penalize repetitions more strongly, while a lower value (e.g., `0.9`) + will be more lenient. (Default: `1.1`)""" temperature: float | None = None """The temperature of the model. Increasing the temperature will - make the model answer more creatively. (Default: ``0.8``)""" + make the model answer more creatively. (Default: `0.8`)""" seed: int | None = None """Sets the random number seed to use for generation. Setting this @@ -188,21 +188,21 @@ class OllamaLLM(BaseLLM): tfs_z: float | None = None """Tail free sampling is used to reduce the impact of less probable - tokens from the output. A higher value (e.g., ``2.0``) will reduce the + tokens from the output. A higher value (e.g., `2.0`) will reduce the impact more, while a value of 1.0 disables this setting. (default: `1`)""" top_k: int | None = None - """Reduces the probability of generating nonsense. A higher value (e.g. ``100``) - will give more diverse answers, while a lower value (e.g. ``10``) - will be more conservative. (Default: ``40``)""" + """Reduces the probability of generating nonsense. A higher value (e.g. `100`) + will give more diverse answers, while a lower value (e.g. `10`) + will be more conservative. (Default: `40`)""" top_p: float | None = None - """Works together with top-k. A higher value (e.g., ``0.95``) will lead - to more diverse text, while a lower value (e.g., ``0.5``) will - generate more focused and conservative text. (Default: ``0.9``)""" + """Works together with top-k. A higher value (e.g., `0.95`) will lead + to more diverse text, while a lower value (e.g., `0.5`) will + generate more focused and conservative text. (Default: `0.9`)""" format: Literal["", "json"] = "" - """Specify the format of the output (options: ``'json'``)""" + """Specify the format of the output (options: `'json'`)""" keep_alive: int | str | None = None """How long the model will stay loaded into memory.""" @@ -234,7 +234,7 @@ class OllamaLLM(BaseLLM): These arguments are passed to both synchronous and async clients. - Use ``sync_client_kwargs`` and ``async_client_kwargs`` to pass different arguments + Use `sync_client_kwargs` and `async_client_kwargs` to pass different arguments to synchronous and asynchronous clients. """ diff --git a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_reasoning.py b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_reasoning.py index 01e21b64473..6ea05a626fa 100644 --- a/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_reasoning.py +++ b/libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_reasoning.py @@ -13,7 +13,7 @@ REASONING_MODEL_NAME = "deepseek-r1:1.5b" @pytest.mark.parametrize("model", [REASONING_MODEL_NAME]) @pytest.mark.parametrize("use_async", [False, True]) async def test_stream_no_reasoning(model: str, use_async: bool) -> None: - """Test streaming with ``reasoning=False``.""" + """Test streaming with `reasoning=False`.""" llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) messages = [ { @@ -46,7 +46,7 @@ async def test_stream_no_reasoning(model: str, use_async: bool) -> None: @pytest.mark.parametrize("model", [REASONING_MODEL_NAME]) @pytest.mark.parametrize("use_async", [False, True]) async def test_stream_reasoning_none(model: str, use_async: bool) -> None: - """Test streaming with ``reasoning=None``.""" + """Test streaming with `reasoning=None`.""" llm = ChatOllama(model=model, num_ctx=2**12, reasoning=None) messages = [ { @@ -81,7 +81,7 @@ async def test_stream_reasoning_none(model: str, use_async: bool) -> None: @pytest.mark.parametrize("model", [REASONING_MODEL_NAME]) @pytest.mark.parametrize("use_async", [False, True]) async def test_reasoning_stream(model: str, use_async: bool) -> None: - """Test streaming with ``reasoning=True``.""" + """Test streaming with `reasoning=True`.""" llm = ChatOllama(model=model, num_ctx=2**12, reasoning=True) messages = [ { @@ -125,7 +125,7 @@ async def test_reasoning_stream(model: str, use_async: bool) -> None: @pytest.mark.parametrize("model", [REASONING_MODEL_NAME]) @pytest.mark.parametrize("use_async", [False, True]) async def test_invoke_no_reasoning(model: str, use_async: bool) -> None: - """Test invoke with ``reasoning=False``.""" + """Test invoke with `reasoning=False`.""" llm = ChatOllama(model=model, num_ctx=2**12, reasoning=False) message = HumanMessage(content=SAMPLE) if use_async: @@ -141,7 +141,7 @@ async def test_invoke_no_reasoning(model: str, use_async: bool) -> None: @pytest.mark.parametrize("model", [REASONING_MODEL_NAME]) @pytest.mark.parametrize("use_async", [False, True]) async def test_invoke_reasoning_none(model: str, use_async: bool) -> None: - """Test invoke with ``reasoning=None``.""" + """Test invoke with `reasoning=None`.""" llm = ChatOllama(model=model, num_ctx=2**12, reasoning=None) message = HumanMessage(content=SAMPLE) if use_async: @@ -159,7 +159,7 @@ async def test_invoke_reasoning_none(model: str, use_async: bool) -> None: @pytest.mark.parametrize("model", [REASONING_MODEL_NAME]) @pytest.mark.parametrize("use_async", [False, True]) async def test_reasoning_invoke(model: str, use_async: bool) -> None: - """Test invoke with ``reasoning=True``.""" + """Test invoke with `reasoning=True`.""" llm = ChatOllama(model=model, num_ctx=2**12, reasoning=True) message = HumanMessage(content=SAMPLE) if use_async: @@ -185,15 +185,15 @@ async def test_reasoning_invoke(model: str, use_async: bool) -> None: @pytest.mark.parametrize("model", [REASONING_MODEL_NAME]) def test_think_tag_stripping_necessity(model: str) -> None: - """Test that demonstrates why ``_strip_think_tags`` is necessary. + """Test that demonstrates why `_strip_think_tags` is necessary. DeepSeek R1 models include reasoning/thinking as their default behavior. - When ``reasoning=False`` is set, the user explicitly wants no reasoning content, + When `reasoning=False` is set, the user explicitly wants no reasoning content, but Ollama cannot disable thinking at the API level for these models. - Therefore, post-processing is required to strip the ```` tags. + Therefore, post-processing is required to strip the `` tags. This test documents the specific behavior that necessitates the - ``_strip_think_tags`` function in the chat_models.py implementation. + `_strip_think_tags` function in the chat_models.py implementation. """ # Test with reasoning=None (default behavior - should include think tags) llm_default = ChatOllama(model=model, reasoning=None, num_ctx=2**12) diff --git a/libs/partners/ollama/tests/unit_tests/test_chat_models.py b/libs/partners/ollama/tests/unit_tests/test_chat_models.py index 8719fdc3bd4..666d4877953 100644 --- a/libs/partners/ollama/tests/unit_tests/test_chat_models.py +++ b/libs/partners/ollama/tests/unit_tests/test_chat_models.py @@ -74,7 +74,7 @@ def test__parse_arguments_from_tool_call() -> None: def test__parse_arguments_from_tool_call_with_function_name_metadata() -> None: """Test that functionName metadata is filtered out from tool arguments. - Some models may include metadata like ``functionName`` in the arguments + Some models may include metadata like `functionName` in the arguments that just echoes the function name. This should be filtered out for no-argument tools to return an empty dictionary. """ diff --git a/libs/partners/openai/langchain_openai/chat_models/_client_utils.py b/libs/partners/openai/langchain_openai/chat_models/_client_utils.py index d77fedb4e61..3eba5c7309b 100644 --- a/libs/partners/openai/langchain_openai/chat_models/_client_utils.py +++ b/libs/partners/openai/langchain_openai/chat_models/_client_utils.py @@ -84,7 +84,7 @@ def _get_default_httpx_client( ) -> _SyncHttpxClientWrapper: """Get default httpx client. - Uses cached client unless timeout is ``httpx.Timeout``, which is not hashable. + Uses cached client unless timeout is `httpx.Timeout`, which is not hashable. """ try: hash(timeout) @@ -99,7 +99,7 @@ def _get_default_async_httpx_client( ) -> _AsyncHttpxClientWrapper: """Get default httpx client. - Uses cached client unless timeout is ``httpx.Timeout``, which is not hashable. + Uses cached client unless timeout is `httpx.Timeout`, which is not hashable. """ try: hash(timeout) diff --git a/libs/partners/openai/langchain_openai/chat_models/_compat.py b/libs/partners/openai/langchain_openai/chat_models/_compat.py index 0c664ac0bad..a762da564ce 100644 --- a/libs/partners/openai/langchain_openai/chat_models/_compat.py +++ b/libs/partners/openai/langchain_openai/chat_models/_compat.py @@ -1,11 +1,11 @@ -"""Converts between AIMessage output formats, governed by ``output_version``. +"""Converts between AIMessage output formats, governed by `output_version`. -``output_version`` is an attribute on ChatOpenAI. +`output_version` is an attribute on ChatOpenAI. -Supported values are `None`, ``'v0'``, and ``'responses/v1'``. +Supported values are `None`, `'v0'`, and `'responses/v1'`. -``'v0'`` corresponds to the format as of ``ChatOpenAI`` v0.3. For the Responses API, it -stores reasoning and tool outputs in ``AIMessage.additional_kwargs``: +`'v0'` corresponds to the format as of `ChatOpenAI` v0.3. For the Responses API, it +stores reasoning and tool outputs in `AIMessage.additional_kwargs`: .. code-block:: python @@ -32,7 +32,7 @@ stores reasoning and tool outputs in ``AIMessage.additional_kwargs``: id="msg_123", ) -``'responses/v1'`` is only applicable to the Responses API. It retains information +`'responses/v1'` is only applicable to the Responses API. It retains information about response item sequencing and accommodates multiple reasoning items by representing these items in the content sequence: diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index f2c411efe9b..6bd18f14e3a 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -39,7 +39,7 @@ class AzureChatOpenAI(BaseChatOpenAI): to create your Azure OpenAI deployment. Then install `langchain-openai` and set environment variables - ``AZURE_OPENAI_API_KEY`` and ``AZURE_OPENAI_ENDPOINT``: + `AZURE_OPENAI_API_KEY` and `AZURE_OPENAI_ENDPOINT`: .. code-block:: bash @@ -68,13 +68,13 @@ class AzureChatOpenAI(BaseChatOpenAI): Max number of retries. organization: str | None OpenAI organization ID. If not passed in will be read from env - var ``OPENAI_ORG_ID``. + var `OPENAI_ORG_ID`. model: str | None The name of the underlying OpenAI model. Used for tracing and token - counting. Does not affect completion. E.g. ``'gpt-4'``, ``'gpt-35-turbo'``, etc. + counting. Does not affect completion. E.g. `'gpt-4'`, `'gpt-35-turbo'`, etc. model_version: str | None The version of the underlying OpenAI model. Used for tracing and token - counting. Does not affect completion. E.g., ``'0125'``, ``'0125-preview'``, etc. + counting. Does not affect completion. E.g., `'0125'`, `'0125-preview'`, etc. See full list of supported init args and their descriptions in the params section. @@ -98,7 +98,7 @@ class AzureChatOpenAI(BaseChatOpenAI): !!! note Any param which is not explicitly supported will be passed directly to the - ``openai.AzureOpenAI.chat.completions.create(...)`` API every time to the model is + `openai.AzureOpenAI.chat.completions.create(...)` API every time to the model is invoked. For example: @@ -314,7 +314,7 @@ class AzureChatOpenAI(BaseChatOpenAI): rating=None, ) - See ``AzureChatOpenAI.with_structured_output()`` for more. + See `AzureChatOpenAI.with_structured_output()` for more. JSON mode: .. code-block:: python @@ -467,14 +467,14 @@ class AzureChatOpenAI(BaseChatOpenAI): ) """Your Azure endpoint, including the resource. - Automatically inferred from env var ``AZURE_OPENAI_ENDPOINT`` if not provided. + Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. - Example: ``https://example-resource.azure.openai.com/`` + Example: `https://example-resource.azure.openai.com/` """ deployment_name: str | None = Field(default=None, alias="azure_deployment") """A model deployment. - If given sets the base client URL to include ``/deployments/{azure_deployment}`` + If given sets the base client URL to include `/deployments/{azure_deployment}` !!! note This means you won't be able to use non-deployment endpoints. @@ -483,7 +483,7 @@ class AzureChatOpenAI(BaseChatOpenAI): alias="api_version", default_factory=from_env("OPENAI_API_VERSION", default=None), ) - """Automatically inferred from env var ``OPENAI_API_VERSION`` if not provided.""" + """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" # Check OPENAI_API_KEY for backwards compatibility. # TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using # other forms of azure credentials. @@ -493,13 +493,13 @@ class AzureChatOpenAI(BaseChatOpenAI): ["AZURE_OPENAI_API_KEY", "OPENAI_API_KEY"], default=None ), ) - """Automatically inferred from env var ``AZURE_OPENAI_API_KEY`` if not provided.""" + """Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.""" azure_ad_token: SecretStr | None = Field( default_factory=secret_from_env("AZURE_OPENAI_AD_TOKEN", default=None) ) """Your Azure Active Directory token. - Automatically inferred from env var ``AZURE_OPENAI_AD_TOKEN`` if not provided. + Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. For more, see [this page](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id). """ @@ -507,7 +507,7 @@ class AzureChatOpenAI(BaseChatOpenAI): """A function that returns an Azure Active Directory token. Will be invoked on every sync request. For async requests, - will be invoked if ``azure_ad_async_token_provider`` is not provided. + will be invoked if `azure_ad_async_token_provider` is not provided. """ azure_ad_async_token_provider: Callable[[], Awaitable[str]] | None = None @@ -517,7 +517,7 @@ class AzureChatOpenAI(BaseChatOpenAI): """ model_version: str = "" - """The version of the model (e.g. ``'0125'`` for ``'gpt-3.5-0125'``). + """The version of the model (e.g. `'0125'` for `'gpt-3.5-0125'`). Azure OpenAI doesn't return model version with the response by default so it must be manually specified if you want to use this information downstream, e.g. when @@ -532,15 +532,15 @@ class AzureChatOpenAI(BaseChatOpenAI): openai_api_type: str | None = Field( default_factory=from_env("OPENAI_API_TYPE", default="azure") ) - """Legacy, for ``openai<1.0.0`` support.""" + """Legacy, for `openai<1.0.0` support.""" validate_base_url: bool = True - """If legacy arg ``openai_api_base`` is passed in, try to infer if it is a - ``base_url`` or ``azure_endpoint`` and update client params accordingly. + """If legacy arg `openai_api_base` is passed in, try to infer if it is a + `base_url` or `azure_endpoint` and update client params accordingly. """ model_name: str | None = Field(default=None, alias="model") # type: ignore[assignment] - """Name of the deployed OpenAI model, e.g. ``'gpt-4o'``, ``'gpt-35-turbo'``, etc. + """Name of the deployed OpenAI model, e.g. `'gpt-4o'`, `'gpt-35-turbo'`, etc. Distinct from the Azure deployment name, which is set by the Azure user. Used for tracing and token counting. @@ -553,12 +553,12 @@ class AzureChatOpenAI(BaseChatOpenAI): """Parameters of the OpenAI client or chat.completions endpoint that should be disabled for the given model. - Should be specified as ``{"param": None | ['val1', 'val2']}`` where the key is the + Should be specified as `{"param": None | ['val1', 'val2']}` where the key is the parameter and the value is either None, meaning that parameter should never be used, or it's a list of disabled values for the parameter. - For example, older models may not support the ``'parallel_tool_calls'`` parameter at - all, in which case ``disabled_params={"parallel_tool_calls: None}`` can ben passed + For example, older models may not support the `'parallel_tool_calls'` parameter at + all, in which case `disabled_params={"parallel_tool_calls: None}` can ben passed in. If a parameter is disabled then it will not be used by default in any methods, e.g. @@ -567,8 +567,8 @@ class AzureChatOpenAI(BaseChatOpenAI): However this does not prevent a user from directly passed in the parameter during invocation. - By default, unless ``model_name="gpt-4o"`` is specified, then - ``'parallel_tools_calls'`` will be disabled. + By default, unless `model_name="gpt-4o"` is specified, then + `'parallel_tools_calls'` will be disabled. """ max_tokens: int | None = Field(default=None, alias="max_completion_tokens") # type: ignore[assignment] @@ -842,7 +842,7 @@ class AzureChatOpenAI(BaseChatOpenAI): - or a Pydantic class, - an OpenAI function/tool schema. - If ``schema`` is a Pydantic class then the model output will be a + If `schema` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool` @@ -851,14 +851,14 @@ class AzureChatOpenAI(BaseChatOpenAI): method: The method for steering model generation, one of: - - ``'json_schema'``: + - `'json_schema'`: Uses OpenAI's [Structured Output API](https://platform.openai.com/docs/guides/structured-outputs). - Supported for ``'gpt-4o-mini'``, ``'gpt-4o-2024-08-06'``, ``'o1'``, and later + Supported for `'gpt-4o-mini'`, `'gpt-4o-2024-08-06'`, `'o1'`, and later models. - - ``'function_calling'``: + - `'function_calling'`: Uses OpenAI's tool-calling (formerly called function calling) [API](https://platform.openai.com/docs/guides/function-calling) - - ``'json_mode'``: + - `'json_mode'`: Uses OpenAI's [JSON mode](https://platform.openai.com/docs/guides/structured-outputs/json-mode). Note that if using JSON mode then you must include instructions for formatting the output into the desired schema into the model call @@ -872,7 +872,7 @@ class AzureChatOpenAI(BaseChatOpenAI): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. strict: - True: @@ -882,23 +882,23 @@ class AzureChatOpenAI(BaseChatOpenAI): Input schema will not be validated and model output will not be validated. - None: - ``strict`` argument will not be passed to the model. + `strict` argument will not be passed to the model. - If schema is specified via TypedDict or JSON schema, ``strict`` is not - enabled by default. Pass ``strict=True`` to enable it. + If schema is specified via TypedDict or JSON schema, `strict` is not + enabled by default. Pass `strict=True` to enable it. !!! note - ``strict`` can only be non-null if ``method`` is ``'json_schema'`` - or ``'function_calling'``. + `strict` can only be non-null if `method` is `'json_schema'` + or `'function_calling'`. tools: A list of tool-like objects to bind to the chat model. Requires that: - - ``method`` is ``'json_schema'`` (default). - - ``strict=True`` - - ``include_raw=True`` + - `method` is `'json_schema'` (default). + - `strict=True` + - `include_raw=True` If a model elects to call a - tool, the resulting `AIMessage` in ``'raw'`` will include tool calls. + tool, the resulting `AIMessage` in `'raw'` will include tool calls. ??? example @@ -940,35 +940,35 @@ class AzureChatOpenAI(BaseChatOpenAI): Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs - an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs + an instance of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is False then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: + If `include_raw` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None !!! warning "Behavior changed in 0.1.20" - Added support for TypedDict class ``schema``. + Added support for TypedDict class `schema`. !!! warning "Behavior changed in 0.1.21" - Support for ``strict`` argument added. - Support for ``method="json_schema"`` added. + Support for `strict` argument added. + Support for `method="json_schema"` added. !!! warning "Behavior changed in 0.3.0" - ``method`` default changed from "function_calling" to "json_schema". + `method` default changed from "function_calling" to "json_schema". !!! warning "Behavior changed in 0.3.12" Support for `tools` added. !!! warning "Behavior changed in 0.3.21" - Pass ``kwargs`` through to the model. + Pass `kwargs` through to the model. ??? note "Example: `schema=Pydantic` class, `method='json_schema'`, `include_raw=False`, `strict=True`" Note, OpenAI has a number of restrictions on what types of schemas can be - provided if ``strict`` = True. When using Pydantic, our model cannot + provided if `strict` = True. When using Pydantic, our model cannot specify any Field metadata (like min/max constraints) and fields cannot have default values. diff --git a/libs/partners/openai/langchain_openai/embeddings/azure.py b/libs/partners/openai/langchain_openai/embeddings/azure.py index f97bb0be18a..9d3cbd16400 100644 --- a/libs/partners/openai/langchain_openai/embeddings/azure.py +++ b/libs/partners/openai/langchain_openai/embeddings/azure.py @@ -91,7 +91,7 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override] .. code-block:: python vector = await embed.aembed_query(input_text) - print(vector[:3]) + print(vector[:3]) # multiple: # await embed.aembed_documents(input_texts) @@ -107,7 +107,7 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override] ) """Your Azure endpoint, including the resource. - Automatically inferred from env var ``AZURE_OPENAI_ENDPOINT`` if not provided. + Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. Example: `https://example-resource.azure.openai.com/` """ @@ -129,14 +129,14 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override] ["AZURE_OPENAI_API_KEY", "OPENAI_API_KEY"], default=None ), ) - """Automatically inferred from env var ``AZURE_OPENAI_API_KEY`` if not provided.""" + """Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.""" openai_api_version: str | None = Field( default_factory=from_env("OPENAI_API_VERSION", default="2023-05-15"), alias="api_version", ) - """Automatically inferred from env var ``OPENAI_API_VERSION`` if not provided. + """Automatically inferred from env var `OPENAI_API_VERSION` if not provided. - Set to ``'2023-05-15'`` by default if env variable ``OPENAI_API_VERSION`` is not + Set to `'2023-05-15'` by default if env variable `OPENAI_API_VERSION` is not set. """ azure_ad_token: SecretStr | None = Field( @@ -144,7 +144,7 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override] ) """Your Azure Active Directory token. - Automatically inferred from env var ``AZURE_OPENAI_AD_TOKEN`` if not provided. + Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. [For more, see this page.](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id) """ @@ -152,7 +152,7 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override] """A function that returns an Azure Active Directory token. Will be invoked on every sync request. For async requests, - will be invoked if ``azure_ad_async_token_provider`` is not provided. + will be invoked if `azure_ad_async_token_provider` is not provided. """ azure_ad_async_token_provider: Callable[[], Awaitable[str]] | None = None """A function that returns an Azure Active Directory token. diff --git a/libs/partners/openai/langchain_openai/embeddings/base.py b/libs/partners/openai/langchain_openai/embeddings/base.py index 4d7b46e4e9d..27817a39d5c 100644 --- a/libs/partners/openai/langchain_openai/embeddings/base.py +++ b/libs/partners/openai/langchain_openai/embeddings/base.py @@ -82,7 +82,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): """OpenAI embedding model integration. Setup: - Install ``langchain_openai`` and set environment variable ``OPENAI_API_KEY``. + Install `langchain_openai` and set environment variable `OPENAI_API_KEY`. .. code-block:: bash @@ -94,14 +94,14 @@ class OpenAIEmbeddings(BaseModel, Embeddings): Name of OpenAI model to use. dimensions: int | None = None The number of dimensions the resulting output embeddings should have. - Only supported in ``'text-embedding-3'`` and later models. + Only supported in `'text-embedding-3'` and later models. Key init args — client params: api_key: SecretStr | None = None OpenAI API key. organization: str | None = None OpenAI organization ID. If not passed in will be read - from env var ``OPENAI_ORG_ID``. + from env var `OPENAI_ORG_ID`. max_retries: int = 2 Maximum number of retries to make when generating. request_timeout: float | Tuple[float, float] | Any | None = None @@ -196,14 +196,14 @@ class OpenAIEmbeddings(BaseModel, Embeddings): openai_api_key: SecretStr | None = Field( alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None) ) - """Automatically inferred from env var ``OPENAI_API_KEY`` if not provided.""" + """Automatically inferred from env var `OPENAI_API_KEY` if not provided.""" openai_organization: str | None = Field( alias="organization", default_factory=from_env( ["OPENAI_ORG_ID", "OPENAI_ORGANIZATION"], default=None ), ) - """Automatically inferred from env var ``OPENAI_ORG_ID`` if not provided.""" + """Automatically inferred from env var `OPENAI_ORG_ID` if not provided.""" allowed_special: Literal["all"] | set[str] | None = None disallowed_special: Literal["all"] | set[str] | Sequence[str] | None = None chunk_size: int = 1000 @@ -213,12 +213,12 @@ class OpenAIEmbeddings(BaseModel, Embeddings): request_timeout: float | tuple[float, float] | Any | None = Field( default=None, alias="timeout" ) - """Timeout for requests to OpenAI completion API. Can be float, ``httpx.Timeout`` or + """Timeout for requests to OpenAI completion API. Can be float, `httpx.Timeout` or None.""" headers: Any = None tiktoken_enabled: bool = True """Set this to False for non-OpenAI implementations of the embeddings API, e.g. - the ``--extensions openai`` extension for ``text-generation-webui``""" + the `--extensions openai` extension for `text-generation-webui`""" tiktoken_model_name: str | None = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain @@ -245,13 +245,13 @@ class OpenAIEmbeddings(BaseModel, Embeddings): retry_max_seconds: int = 20 """Max number of seconds to wait between retries""" http_client: Any | None = None - """Optional ``httpx.Client``. Only used for sync invocations. Must specify - ``http_async_client`` as well if you'd like a custom client for async + """Optional `httpx.Client`. Only used for sync invocations. Must specify + `http_async_client` as well if you'd like a custom client for async invocations. """ http_async_client: Any | None = None - """Optional ``httpx.AsyncClient``. Only used for async invocations. Must specify - ``http_client`` as well if you'd like a custom client for sync invocations.""" + """Optional `httpx.AsyncClient`. Only used for async invocations. Must specify + `http_client` as well if you'd like a custom client for sync invocations.""" check_embedding_ctx_length: bool = True """Whether to check the token length of inputs and automatically split inputs longer than embedding_ctx_length.""" diff --git a/libs/partners/openai/langchain_openai/llms/azure.py b/libs/partners/openai/langchain_openai/llms/azure.py index 8fd9bc6d36c..97cc219dbcc 100644 --- a/libs/partners/openai/langchain_openai/llms/azure.py +++ b/libs/partners/openai/langchain_openai/llms/azure.py @@ -21,7 +21,7 @@ class AzureOpenAI(BaseOpenAI): """Azure-specific OpenAI large language models. To use, you should have the `openai` python package installed, and the - environment variable ``OPENAI_API_KEY`` set with your API key. + environment variable `OPENAI_API_KEY` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. @@ -40,9 +40,9 @@ class AzureOpenAI(BaseOpenAI): ) """Your Azure endpoint, including the resource. - Automatically inferred from env var ``AZURE_OPENAI_ENDPOINT`` if not provided. + Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. - Example: ``'https://example-resource.azure.openai.com/'`` + Example: `'https://example-resource.azure.openai.com/'` """ deployment_name: str | None = Field(default=None, alias="azure_deployment") """A model deployment. @@ -57,7 +57,7 @@ class AzureOpenAI(BaseOpenAI): alias="api_version", default_factory=from_env("OPENAI_API_VERSION", default=None), ) - """Automatically inferred from env var ``OPENAI_API_VERSION`` if not provided.""" + """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" # Check OPENAI_KEY for backwards compatibility. # TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using # other forms of azure credentials. @@ -72,7 +72,7 @@ class AzureOpenAI(BaseOpenAI): ) """Your Azure Active Directory token. - Automatically inferred from env var ``AZURE_OPENAI_AD_TOKEN`` if not provided. + Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. `For more, see this page .`__ """ @@ -80,7 +80,7 @@ class AzureOpenAI(BaseOpenAI): """A function that returns an Azure Active Directory token. Will be invoked on every sync request. For async requests, - will be invoked if ``azure_ad_async_token_provider`` is not provided. + will be invoked if `azure_ad_async_token_provider` is not provided. """ azure_ad_async_token_provider: Callable[[], Awaitable[str]] | None = None """A function that returns an Azure Active Directory token. @@ -90,7 +90,7 @@ class AzureOpenAI(BaseOpenAI): openai_api_type: str | None = Field( default_factory=from_env("OPENAI_API_TYPE", default="azure") ) - """Legacy, for ``openai<1.0.0`` support.""" + """Legacy, for `openai<1.0.0` support.""" validate_base_url: bool = True """For backwards compatibility. If legacy val openai_api_base is passed in, try to infer if it is a base_url or azure_endpoint and update accordingly. diff --git a/libs/partners/openai/langchain_openai/llms/base.py b/libs/partners/openai/langchain_openai/llms/base.py index 271aea9ad4e..b7d92c10aed 100644 --- a/libs/partners/openai/langchain_openai/llms/base.py +++ b/libs/partners/openai/langchain_openai/llms/base.py @@ -54,7 +54,7 @@ class BaseOpenAI(BaseLLM): """Base OpenAI large language model class. Setup: - Install `langchain-openai` and set environment variable ``OPENAI_API_KEY``. + Install `langchain-openai` and set environment variable `OPENAI_API_KEY`. .. code-block:: bash @@ -90,13 +90,13 @@ class BaseOpenAI(BaseLLM): Key init args — client params: openai_api_key: SecretStr | None OpenAI API key. If not passed in will be read from env var - ``OPENAI_API_KEY``. + `OPENAI_API_KEY`. openai_api_base: str | None Base URL path for API requests, leave blank if not using a proxy or service emulator. openai_organization: str | None OpenAI organization ID. If not passed in will be read from env - var ``OPENAI_ORG_ID``. + var `OPENAI_ORG_ID`. request_timeout: Union[float, tuple[float, float], Any, None] Timeout for requests to OpenAI completion API. max_retries: int @@ -191,7 +191,7 @@ class BaseOpenAI(BaseLLM): openai_api_key: SecretStr | None = Field( alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None) ) - """Automatically inferred from env var ``OPENAI_API_KEY`` if not provided.""" + """Automatically inferred from env var `OPENAI_API_KEY` if not provided.""" openai_api_base: str | None = Field( alias="base_url", default_factory=from_env("OPENAI_API_BASE", default=None) ) @@ -203,7 +203,7 @@ class BaseOpenAI(BaseLLM): ["OPENAI_ORG_ID", "OPENAI_ORGANIZATION"], default=None ), ) - """Automatically inferred from env var ``OPENAI_ORG_ID`` if not provided.""" + """Automatically inferred from env var `OPENAI_ORG_ID` if not provided.""" # to support explicit proxy for OpenAI openai_proxy: str | None = Field( default_factory=from_env("OPENAI_PROXY", default=None) @@ -213,7 +213,7 @@ class BaseOpenAI(BaseLLM): request_timeout: float | tuple[float, float] | Any | None = Field( default=None, alias="timeout" ) - """Timeout for requests to OpenAI completion API. Can be float, ``httpx.Timeout`` or + """Timeout for requests to OpenAI completion API. Can be float, `httpx.Timeout` or None.""" logit_bias: dict[str, float] | None = None """Adjust the probability of specific tokens being generated.""" @@ -245,13 +245,13 @@ class BaseOpenAI(BaseLLM): # Configure a custom httpx client. See the # [httpx documentation](https://www.python-httpx.org/api/#client) for more details. http_client: Any | None = None - """Optional ``httpx.Client``. Only used for sync invocations. Must specify - ``http_async_client`` as well if you'd like a custom client for async + """Optional `httpx.Client`. Only used for sync invocations. Must specify + `http_async_client` as well if you'd like a custom client for async invocations. """ http_async_client: Any | None = None - """Optional ``httpx.AsyncClient``. Only used for async invocations. Must specify - ``http_client`` as well if you'd like a custom client for sync invocations.""" + """Optional `httpx.AsyncClient`. Only used for async invocations. Must specify + `http_client` as well if you'd like a custom client for sync invocations.""" extra_body: Mapping[str, Any] | None = None """Optional additional JSON properties to include in the request parameters when making requests to OpenAI compatible APIs, such as vLLM.""" @@ -704,7 +704,7 @@ class OpenAI(BaseOpenAI): """OpenAI completion model integration. Setup: - Install `langchain-openai` and set environment variable ``OPENAI_API_KEY``. + Install `langchain-openai` and set environment variable `OPENAI_API_KEY`. .. code-block:: bash @@ -722,7 +722,7 @@ class OpenAI(BaseOpenAI): Whether to return logprobs. stream_options: Dict Configure streaming outputs, like whether to return token usage when - streaming (``{"include_usage": True}``). + streaming (`{"include_usage": True}`). Key init args — client params: timeout: Union[float, Tuple[float, float], Any, None] @@ -730,13 +730,13 @@ class OpenAI(BaseOpenAI): max_retries: int Max number of retries. api_key: str | None - OpenAI API key. If not passed in will be read from env var ``OPENAI_API_KEY``. + OpenAI API key. If not passed in will be read from env var `OPENAI_API_KEY`. base_url: str | None Base URL for API requests. Only specify if using a proxy or service emulator. organization: str | None OpenAI organization ID. If not passed in will be read from env - var ``OPENAI_ORG_ID``. + var `OPENAI_ORG_ID`. See full list of supported init args and their descriptions in the params section. diff --git a/libs/partners/openai/langchain_openai/tools/custom_tool.py b/libs/partners/openai/langchain_openai/tools/custom_tool.py index 743a9bd21f5..02410a0e2ec 100644 --- a/libs/partners/openai/langchain_openai/tools/custom_tool.py +++ b/libs/partners/openai/langchain_openai/tools/custom_tool.py @@ -51,7 +51,7 @@ def custom_tool(*args: Any, **kwargs: Any) -> Any: step["messages"][-1].pretty_print() You can also specify a format for a corresponding context-free grammar using the - ``format`` kwarg: + `format` kwarg: .. code-block:: python diff --git a/libs/partners/perplexity/langchain_perplexity/chat_models.py b/libs/partners/perplexity/langchain_perplexity/chat_models.py index 81ca00929ec..e8cefba3383 100644 --- a/libs/partners/perplexity/langchain_perplexity/chat_models.py +++ b/libs/partners/perplexity/langchain_perplexity/chat_models.py @@ -63,7 +63,7 @@ class ChatPerplexity(BaseChatModel): """`Perplexity AI` Chat models API. Setup: - To use, you should have the environment variable ``PPLX_API_KEY`` set to your API key. + To use, you should have the environment variable `PPLX_API_KEY` set to your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. @@ -416,7 +416,7 @@ class ChatPerplexity(BaseChatModel): method: The method for steering model generation, currently only support: - - ``'json_schema'``: Use the JSON Schema to parse the model output + - `'json_schema'`: Use the JSON Schema to parse the model output include_raw: @@ -425,7 +425,7 @@ class ChatPerplexity(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. strict: Unsupported: whether to enable strict schema adherence when generating @@ -437,14 +437,14 @@ class ChatPerplexity(BaseChatModel): Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs - an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs + an instance of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is False then Runnable outputs a dict. - If ``include_raw`` is True, then Runnable outputs a dict with keys: + If `include_raw` is True, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None """ # noqa: E501 if method in ("function_calling", "json_mode"): diff --git a/libs/partners/qdrant/langchain_qdrant/qdrant.py b/libs/partners/qdrant/langchain_qdrant/qdrant.py index 42ba86977cd..58ec8ad6ca5 100644 --- a/libs/partners/qdrant/langchain_qdrant/qdrant.py +++ b/libs/partners/qdrant/langchain_qdrant/qdrant.py @@ -40,7 +40,7 @@ class QdrantVectorStore(VectorStore): """Qdrant vector store integration. Setup: - Install ``langchain-qdrant`` package. + Install `langchain-qdrant` package. .. code-block:: bash @@ -270,7 +270,7 @@ class QdrantVectorStore(VectorStore): """Get the Qdrant client instance that is being used. Returns: - QdrantClient: An instance of ``QdrantClient``. + QdrantClient: An instance of `QdrantClient`. """ return self._client @@ -280,7 +280,7 @@ class QdrantVectorStore(VectorStore): """Get the dense embeddings instance that is being used. Returns: - Embeddings: An instance of ``Embeddings``, or None for SPARSE mode. + Embeddings: An instance of `Embeddings`, or None for SPARSE mode. """ return self._embeddings @@ -330,7 +330,7 @@ class QdrantVectorStore(VectorStore): ValueError: If sparse embeddings are `None`. Returns: - SparseEmbeddings: An instance of ``SparseEmbeddings``. + SparseEmbeddings: An instance of `SparseEmbeddings`. """ if self._sparse_embeddings is None: @@ -376,7 +376,7 @@ class QdrantVectorStore(VectorStore): validate_collection_config: bool = True, # noqa: FBT001, FBT002 **kwargs: Any, ) -> QdrantVectorStore: - """Construct an instance of ``QdrantVectorStore`` from a list of texts. + """Construct an instance of `QdrantVectorStore` from a list of texts. This is a user-friendly interface that: 1. Creates embeddings, one for each text @@ -463,10 +463,10 @@ class QdrantVectorStore(VectorStore): validate_collection_config: bool = True, # noqa: FBT001, FBT002 **kwargs: Any, ) -> QdrantVectorStore: - """Construct ``QdrantVectorStore`` from existing collection without adding data. + """Construct `QdrantVectorStore` from existing collection without adding data. Returns: - QdrantVectorStore: A new instance of ``QdrantVectorStore``. + QdrantVectorStore: A new instance of `QdrantVectorStore`. """ client = QdrantClient( location=location, @@ -1011,7 +1011,7 @@ class QdrantVectorStore(VectorStore): @staticmethod def _cosine_relevance_score_fn(distance: float) -> float: - """Normalize the distance to a score on a scale ``[0, 1]``.""" + """Normalize the distance to a score on a scale `[0, 1]`.""" return (distance + 1.0) / 2.0 def _select_relevance_score_fn(self) -> Callable[[float], float]: diff --git a/libs/partners/qdrant/langchain_qdrant/vectorstores.py b/libs/partners/qdrant/langchain_qdrant/vectorstores.py index 33ff8228ad0..4bc465cf39d 100644 --- a/libs/partners/qdrant/langchain_qdrant/vectorstores.py +++ b/libs/partners/qdrant/langchain_qdrant/vectorstores.py @@ -47,8 +47,8 @@ def sync_call_fallback(method: Callable) -> Callable: except NotImplementedError: # If the async method is not implemented, call the synchronous method # by removing the first letter from the method name. For example, - # if the async method is called ``aadd_texts``, the synchronous method - # will be called ``aad_texts``. + # if the async method is called `aadd_texts`, the synchronous method + # will be called `aad_texts`. return await run_in_executor( None, getattr(self, method.__name__[1:]), *args, **kwargs ) @@ -163,7 +163,7 @@ class Qdrant(VectorStore): uuid-like strings. batch_size: How many vectors upload per-request. - Default: ``64`` + Default: `64` **kwargs: Additional keyword arguments. Returns: @@ -200,7 +200,7 @@ class Qdrant(VectorStore): uuid-like strings. batch_size: How many vectors upload per-request. - Default: ``64`` + Default: `64` **kwargs: Additional keyword arguments. Returns: diff --git a/libs/partners/xai/langchain_xai/chat_models.py b/libs/partners/xai/langchain_xai/chat_models.py index 1e63af25166..2bf53caedc0 100644 --- a/libs/partners/xai/langchain_xai/chat_models.py +++ b/libs/partners/xai/langchain_xai/chat_models.py @@ -166,7 +166,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] [Certain xAI models](https://docs.x.ai/docs/models#model-pricing) support reasoning, which allows the model to provide reasoning content along with the response. - If provided, reasoning content is returned under the ``additional_kwargs`` field of the + If provided, reasoning content is returned under the `additional_kwargs` field of the AIMessage or AIMessageChunk. If supported, reasoning effort can be specified in the model constructor's `extra_body` @@ -181,13 +181,13 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] ) !!! note - As of 2025-07-10, ``reasoning_content`` is only returned in Grok 3 models, such as + As of 2025-07-10, `reasoning_content` is only returned in Grok 3 models, such as [Grok 3 Mini](https://docs.x.ai/docs/models/grok-3-mini). !!! note Note that in [Grok 4](https://docs.x.ai/docs/models/grok-4-0709), as of 2025-07-10, - reasoning is not exposed in ``reasoning_content`` (other than initial ``'Thinking...'`` text), - reasoning cannot be disabled, and the ``reasoning_effort`` cannot be specified. + reasoning is not exposed in `reasoning_content` (other than initial `'Thinking...'` text), + reasoning cannot be disabled, and the `reasoning_effort` cannot be specified. Tool calling / function calling: .. code-block:: python @@ -240,7 +240,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] llm = ChatXAI(model="grok-4", extra_body={"tool_choice": "none"}) - To require that the model always calls a tool / function, set `tool_choice` to ``'required'``: + To require that the model always calls a tool / function, set `tool_choice` to `'required'`: .. code-block:: python @@ -409,7 +409,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] xai_api_base: str = Field(default="https://api.x.ai/v1/") """Base URL path for API requests.""" search_parameters: dict[str, Any] | None = None - """Parameters for search requests. Example: ``{"mode": "auto"}``.""" + """Parameters for search requests. Example: `{"mode": "auto"}`.""" openai_api_key: SecretStr | None = None openai_api_base: str | None = None @@ -422,7 +422,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] def lc_secrets(self) -> dict[str, str]: """A map of constructor argument names to secret ids. - For example, ``{"xai_api_key": "XAI_API_KEY"}`` + For example, `{"xai_api_key": "XAI_API_KEY"}` """ return {"xai_api_key": "XAI_API_KEY"} @@ -594,7 +594,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] - a `TypedDict` class (support added in 0.1.20), - or a Pydantic class. - If ``schema`` is a Pydantic class then the model output will be a + If `schema` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool` @@ -603,11 +603,11 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] method: The method for steering model generation, one of: - - ``'function_calling'``: + - `'function_calling'`: Uses xAI's [tool-calling features](https://docs.x.ai/docs/guides/function-calling). - - ``'json_schema'``: + - `'json_schema'`: Uses xAI's [structured output feature](https://docs.x.ai/docs/guides/structured-outputs). - - ``'json_mode'``: + - `'json_mode'`: Uses xAI's JSON mode feature. include_raw: @@ -616,7 +616,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + with keys `'raw'`, `'parsed'`, and `'parsing_error'`. strict: - `True`: @@ -626,20 +626,20 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] Input schema will not be validated and model output will not be validated. - `None`: - ``strict`` argument will not be passed to the model. + `strict` argument will not be passed to the model. kwargs: Additional keyword args aren't supported. Returns: A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`. - If ``include_raw`` is `False` and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is `False` then Runnable outputs a dict. + If `include_raw` is `False` and `schema` is a Pydantic class, Runnable outputs an instance of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is `False` then Runnable outputs a dict. - If ``include_raw`` is `True`, then Runnable outputs a dict with keys: + If `include_raw` is `True`, then Runnable outputs a dict with keys: - - ``'raw'``: BaseMessage - - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``'parsing_error'``: BaseException | None + - `'raw'`: BaseMessage + - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above. + - `'parsing_error'`: BaseException | None """ # noqa: E501 # Some applications require that incompatible parameters (e.g., unsupported diff --git a/libs/standard-tests/langchain_tests/conftest.py b/libs/standard-tests/langchain_tests/conftest.py index 2c017a9f2c0..5a6b3458979 100644 --- a/libs/standard-tests/langchain_tests/conftest.py +++ b/libs/standard-tests/langchain_tests/conftest.py @@ -50,7 +50,7 @@ class CustomSerializer: class CustomPersister: - """A custom persister for VCR that uses the ``CustomSerializer``.""" + """A custom persister for VCR that uses the `CustomSerializer`.""" @classmethod def load_cassette( @@ -100,7 +100,7 @@ _BASE_FILTER_HEADERS = [ def _base_vcr_config() -> dict: """Return VCR configuration that every cassette will receive. - (Anything permitted by ``vcr.VCR(**kwargs)`` can be put here.) + (Anything permitted by `vcr.VCR(**kwargs)` can be put here.) """ return { "record_mode": "once", diff --git a/libs/standard-tests/langchain_tests/integration_tests/embeddings.py b/libs/standard-tests/langchain_tests/integration_tests/embeddings.py index eb5763d527b..b4cbe39826e 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/embeddings.py +++ b/libs/standard-tests/langchain_tests/integration_tests/embeddings.py @@ -8,9 +8,9 @@ from langchain_tests.unit_tests.embeddings import EmbeddingsTests class EmbeddingsIntegrationTests(EmbeddingsTests): """Base class for embeddings integration tests. - Test subclasses must implement the ``embeddings_class`` property to specify the + Test subclasses must implement the `embeddings_class` property to specify the embeddings model to be tested. You can also override the - ``embedding_model_params`` property to specify initialization parameters. + `embedding_model_params` property to specify initialization parameters. Example: @@ -45,8 +45,8 @@ class EmbeddingsIntegrationTests(EmbeddingsTests): If this test fails, check that: - 1. The model will generate a list of floats when calling ``.embed_query`` - on a string. + 1. The model will generate a list of floats when calling `.embed_query` + on a string. 2. The length of the list is consistent across different inputs. """ embedding_1 = model.embed_query("foo") @@ -67,7 +67,7 @@ class EmbeddingsIntegrationTests(EmbeddingsTests): If this test fails, check that: 1. The model will generate a list of lists of floats when calling - ``.embed_documents`` on a list of strings. + `.embed_documents` on a list of strings. 2. The length of each list is the same. """ documents = ["foo", "bar", "baz"] @@ -86,8 +86,8 @@ class EmbeddingsIntegrationTests(EmbeddingsTests): If this test fails, check that: - 1. The model will generate a list of floats when calling ``.aembed_query`` - on a string. + 1. The model will generate a list of floats when calling `.aembed_query` + on a string. 2. The length of the list is consistent across different inputs. """ embedding_1 = await model.aembed_query("foo") @@ -108,7 +108,7 @@ class EmbeddingsIntegrationTests(EmbeddingsTests): If this test fails, check that: 1. The model will generate a list of lists of floats when calling - ``.aembed_documents`` on a list of strings. + `.aembed_documents` on a list of strings. 2. The length of each list is the same. """ documents = ["foo", "bar", "baz"] diff --git a/libs/standard-tests/langchain_tests/unit_tests/embeddings.py b/libs/standard-tests/langchain_tests/unit_tests/embeddings.py index 13d2c8ca0b9..50e95f428c9 100644 --- a/libs/standard-tests/langchain_tests/unit_tests/embeddings.py +++ b/libs/standard-tests/langchain_tests/unit_tests/embeddings.py @@ -33,9 +33,9 @@ class EmbeddingsTests(BaseStandardTests): class EmbeddingsUnitTests(EmbeddingsTests): """Base class for embeddings unit tests. - Test subclasses must implement the ``embeddings_class`` property to specify the + Test subclasses must implement the `embeddings_class` property to specify the embeddings model to be tested. You can also override the - ``embedding_model_params`` property to specify initialization parameters. + `embedding_model_params` property to specify initialization parameters. Example: @@ -59,7 +59,7 @@ class EmbeddingsUnitTests(EmbeddingsTests): return {"model": "model-001"} !!! note - API references for individual test methods include troubleshooting tips. + API references for individual test methods include troubleshooting tips. Testing initialization from environment variables Overriding the `init_from_env_params` property will enable additional tests @@ -99,7 +99,7 @@ class EmbeddingsUnitTests(EmbeddingsTests): ??? note "Troubleshooting" - If this test fails, ensure that ``embedding_model_params`` is specified + If this test fails, ensure that `embedding_model_params` is specified and the model can be initialized from those params. """ model = self.embeddings_class(**self.embedding_model_params) diff --git a/libs/text-splitters/langchain_text_splitters/html.py b/libs/text-splitters/langchain_text_splitters/html.py index daa4e46553f..44835f4864a 100644 --- a/libs/text-splitters/langchain_text_splitters/html.py +++ b/libs/text-splitters/langchain_text_splitters/html.py @@ -616,7 +616,7 @@ class HTMLSemanticPreservingSplitter(BaseDocumentTransformer): denylist_tags: These tags will be removed from the HTML. preserve_parent_metadata: Whether to pass through parent document metadata to split documents when calling - ``transform_documents/atransform_documents()``. + `transform_documents/atransform_documents()`. keep_separator: Whether separators should be at the beginning of a chunk, at the end, or not at all. """ diff --git a/libs/text-splitters/langchain_text_splitters/json.py b/libs/text-splitters/langchain_text_splitters/json.py index 3a195e5fab3..5e5b39f7d08 100644 --- a/libs/text-splitters/langchain_text_splitters/json.py +++ b/libs/text-splitters/langchain_text_splitters/json.py @@ -21,7 +21,7 @@ class RecursiveJsonSplitter: max_chunk_size: int = 2000 """The maximum size for each chunk. Defaults to 2000.""" min_chunk_size: int = 1800 - """The minimum size for each chunk, derived from ``max_chunk_size`` if not + """The minimum size for each chunk, derived from `max_chunk_size` if not explicitly provided.""" def __init__( @@ -30,8 +30,8 @@ class RecursiveJsonSplitter: """Initialize the chunk size configuration for text processing. This constructor sets up the maximum and minimum chunk sizes, ensuring that - the ``min_chunk_size`` defaults to a value slightly smaller than the - ``max_chunk_size`` if not explicitly provided. + the `min_chunk_size` defaults to a value slightly smaller than the + `max_chunk_size` if not explicitly provided. Args: max_chunk_size: The maximum size for a chunk. Defaults to 2000. diff --git a/libs/text-splitters/langchain_text_splitters/markdown.py b/libs/text-splitters/langchain_text_splitters/markdown.py index c654e4ca098..957f364c8a1 100644 --- a/libs/text-splitters/langchain_text_splitters/markdown.py +++ b/libs/text-splitters/langchain_text_splitters/markdown.py @@ -296,7 +296,7 @@ class ExperimentalMarkdownSyntaxTextSplitter: * Splits out code blocks and includes the language in the "Code" metadata key. * Splits text on horizontal rules (`---`) as well. * Defaults to sensible splitting behavior, which can be overridden using the - ``headers_to_split_on`` parameter. + `headers_to_split_on` parameter. Example: