From 7d898e3b23fab508dc09534d7ee01e6b6ee04e68 Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Mon, 30 Jun 2025 09:02:41 -0400 Subject: [PATCH] docs: nits --- .../langchain_core/language_models/base.py | 10 +-- libs/core/langchain_core/runnables/base.py | 62 ++++++++-------- .../langchain_core/runnables/configurable.py | 2 +- libs/core/langchain_core/tracers/context.py | 8 +-- libs/partners/ollama/langchain_ollama/llms.py | 72 ++++++++++--------- 5 files changed, 82 insertions(+), 72 deletions(-) diff --git a/libs/core/langchain_core/language_models/base.py b/libs/core/langchain_core/language_models/base.py index a4d3442d8cc..db6db689dc2 100644 --- a/libs/core/langchain_core/language_models/base.py +++ b/libs/core/langchain_core/language_models/base.py @@ -105,10 +105,10 @@ class BaseLanguageModel( cache: Union[BaseCache, bool, None] = Field(default=None, exclude=True) """Whether to cache the response. - * If true, will use the global cache. - * If false, will not use a cache - * If None, will use the global cache if it's set, otherwise no cache. - * If instance of BaseCache, will use the provided cache. + * If ``true``, will use the global cache. + * If ``false``, will not use a cache + * If ``None``, will use the global cache if it's set, otherwise no cache. + * If instance of ``BaseCache``, will use the provided cache. Caching is not currently supported for streaming methods of models. """ @@ -374,7 +374,7 @@ class BaseLanguageModel( Useful for checking if an input fits in a model's context window. - **Note**: the base implementation of get_num_tokens_from_messages ignores + .. NOTE:: The base implementation of ``get_num_tokens_from_messages()`` ignores tool schemas. Args: diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 185582467fd..8939b236b53 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -736,8 +736,8 @@ class Runnable(Generic[Input, Output], ABC): Args: input: The input to the Runnable. config: A config to use when invoking the Runnable. - The config supports standard keys like 'tags', 'metadata' for tracing - purposes, 'max_concurrency' for controlling how much work to do + The config supports standard keys like ``tags``, ``metadata`` for tracing + purposes, ``max_concurrency`` for controlling how much work to do in parallel, and other keys. Please refer to the RunnableConfig for more details. @@ -751,10 +751,10 @@ class Runnable(Generic[Input, Output], ABC): config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Output: - """Default implementation of ainvoke, calls invoke from a thread. + """Default implementation of ``ainvoke()``, calls ``invoke()`` from a thread. The default implementation allows usage of async code even if - the Runnable did not implement a native async version of invoke. + the Runnable did not implement a native async version of ``invoke()``. Subclasses should override this method if they can run asynchronously. """ @@ -768,13 +768,14 @@ class Runnable(Generic[Input, Output], ABC): return_exceptions: bool = False, **kwargs: Optional[Any], ) -> list[Output]: - """Default implementation runs invoke in parallel using a thread pool executor. + """Default implementation runs ``invoke()`` in parallel using a thread pool + executor. The default implementation of batch works well for IO bound runnables. Subclasses should override this method if they can batch more efficiently; e.g., if the underlying Runnable uses an API which supports a batch mode. - """ + """ # noqa: D205 if not inputs: return [] @@ -824,7 +825,7 @@ class Runnable(Generic[Input, Output], ABC): return_exceptions: bool = False, **kwargs: Optional[Any], ) -> Iterator[tuple[int, Union[Output, Exception]]]: - """Run invoke in parallel on a list of inputs. + """Run ``invoke()`` in parallel on a list of inputs. Yields results as they complete. """ @@ -875,7 +876,8 @@ class Runnable(Generic[Input, Output], ABC): return_exceptions: bool = False, **kwargs: Optional[Any], ) -> list[Output]: - """Default implementation runs ainvoke in parallel using asyncio.gather. + """Default implementation runs ``ainvoke()`` in parallel using + ``asyncio.gather``. The default implementation of batch works well for IO bound runnables. @@ -885,17 +887,17 @@ class Runnable(Generic[Input, Output], ABC): Args: inputs: A list of inputs to the Runnable. config: A config to use when invoking the Runnable. - The config supports standard keys like 'tags', 'metadata' for tracing - purposes, 'max_concurrency' for controlling how much work to do - in parallel, and other keys. Please refer to the RunnableConfig - for more details. Defaults to None. + The config supports standard keys like ``tags``, ``metadata`` for + tracing purposes, ``max_concurrency`` for controlling how much work to + do in parallel, and other keys. Please refer to the RunnableConfig + for more details. Defaults to ``None``. return_exceptions: Whether to return exceptions instead of raising them. - Defaults to False. + Defaults to ``False``. kwargs: Additional keyword arguments to pass to the Runnable. Returns: A list of outputs from the Runnable. - """ + """ # noqa: D205 if not inputs: return [] @@ -943,19 +945,19 @@ class Runnable(Generic[Input, Output], ABC): return_exceptions: bool = False, **kwargs: Optional[Any], ) -> AsyncIterator[tuple[int, Union[Output, Exception]]]: - """Run ainvoke in parallel on a list of inputs. + """Run ``ainvoke()`` in parallel on a list of inputs. Yields results as they complete. Args: inputs: A list of inputs to the Runnable. config: A config to use when invoking the Runnable. - The config supports standard keys like 'tags', 'metadata' for tracing - purposes, 'max_concurrency' for controlling how much work to do - in parallel, and other keys. Please refer to the RunnableConfig - for more details. Defaults to None. Defaults to None. + The config supports standard keys like ``tags``, ``metadata`` for + tracing purposes, ``max_concurrency`` for controlling how much work to + do in parallel, and other keys. Please refer to the RunnableConfig + for more details. Defaults to ``None``. return_exceptions: Whether to return exceptions instead of raising them. - Defaults to False. + Defaults to ``False``. kwargs: Additional keyword arguments to pass to the Runnable. Yields: @@ -1019,13 +1021,13 @@ class Runnable(Generic[Input, Output], ABC): config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> AsyncIterator[Output]: - """Default implementation of astream, which calls ainvoke. + """Default implementation of ``astream()``, which calls ``ainvoke()``. Subclasses should override this method if they support streaming output. Args: input: The input to the Runnable. - config: The config to use for the Runnable. Defaults to None. + config: The config to use for the Runnable. Defaults to ``None``. kwargs: Additional keyword arguments to pass to the Runnable. Yields: @@ -1221,7 +1223,7 @@ class Runnable(Generic[Input, Output], ABC): In addition to the standard events, users can also dispatch custom events (see example below). - Custom events will be only be surfaced with in the `v2` version of the API! + .. NOTE:: Custom events will be only be surfaced with in the `v2` version of the API. A custom event has following format: @@ -1235,7 +1237,7 @@ class Runnable(Generic[Input, Output], ABC): Here are declarations associated with the standard events shown above: - `format_docs`: + ``format_docs``: .. code-block:: python @@ -1245,7 +1247,7 @@ class Runnable(Generic[Input, Output], ABC): format_docs = RunnableLambda(format_docs) - `some_tool`: + ``some_tool``: .. code-block:: python @@ -1254,7 +1256,7 @@ class Runnable(Generic[Input, Output], ABC): '''Some_tool.''' return {"x": x, "y": y} - `prompt`: + ``prompt``: .. code-block:: python @@ -1354,8 +1356,8 @@ class Runnable(Generic[Input, Output], ABC): exclude_types: Exclude events from runnables with matching types. exclude_tags: Exclude events from runnables with matching tags. kwargs: Additional keyword arguments to pass to the Runnable. - These will be passed to astream_log as this implementation - of astream_events is built on top of astream_log. + These will be passed to ``astream_log`` as this implementation + of astream_events is built on top of ``astream_log``. Yields: An async stream of StreamEvents. @@ -2551,9 +2553,9 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]): which: The ConfigurableField instance that will be used to select the alternative. default_key: The default key to use if no alternative is selected. - Defaults to "default". + Defaults to ``'default'``. prefix_keys: Whether to prefix the keys with the ConfigurableField id. - Defaults to False. + Defaults to ``False``. **kwargs: A dictionary of keys to Runnable instances or callables that return Runnable instances. diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index 4f2963353f8..f79acb433d9 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -544,7 +544,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): """The alternatives to choose from.""" default_key: str = "default" - """The enum value to use for the default option. Defaults to "default".""" + """The enum value to use for the default option. Defaults to ``'default'``.""" prefix_keys: bool """Whether to prefix configurable fields of each alternative with a namespace diff --git a/libs/core/langchain_core/tracers/context.py b/libs/core/langchain_core/tracers/context.py index 466bc2f9f11..716da4faba0 100644 --- a/libs/core/langchain_core/tracers/context.py +++ b/libs/core/langchain_core/tracers/context.py @@ -62,13 +62,13 @@ def tracing_v2_enabled( Args: project_name (str, optional): The name of the project. - Defaults to "default". + Defaults to ``'default'``. example_id (str or UUID, optional): The ID of the example. - Defaults to None. + Defaults to ``None``. tags (list[str], optional): The tags to add to the run. - Defaults to None. + Defaults to ``None``. client (LangSmithClient, optional): The client of the langsmith. - Defaults to None. + Defaults to ``None``. Yields: LangChainTracer: The LangChain tracer. diff --git a/libs/partners/ollama/langchain_ollama/llms.py b/libs/partners/ollama/langchain_ollama/llms.py index 83330ec5e3d..54c23ede101 100644 --- a/libs/partners/ollama/langchain_ollama/llms.py +++ b/libs/partners/ollama/langchain_ollama/llms.py @@ -35,27 +35,30 @@ class OllamaLLM(BaseLLM): """Model name to use.""" mirostat: Optional[int] = None - """Enable Mirostat sampling for controlling perplexity. - (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)""" + """Enable Mirostat sampling for controlling perplexity. (Default: ``0``) + + - ``0`` = Disabled + - ``1`` = Mirostat + - ``2`` = Mirostat 2.0""" mirostat_eta: Optional[float] = None """Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make - the algorithm more responsive. (Default: 0.1)""" + the algorithm more responsive. (Default: ``0.1``)""" mirostat_tau: Optional[float] = None """Controls the balance between coherence and diversity of the output. A lower value will result in more focused and - coherent text. (Default: 5.0)""" + coherent text. (Default: ``5.0``)""" num_ctx: Optional[int] = None """Sets the size of the context window used to generate the - next token. (Default: 2048) """ + next token. (Default: ``2048``)""" num_gpu: Optional[int] = None - """The number of GPUs to use. On macOS it defaults to 1 to - enable metal support, 0 to disable.""" + """The number of GPUs to use. Defaults to ``1`` on macOS (to enable metal + support). Set to ``0`` to disable.""" num_thread: Optional[int] = None """Sets the number of threads to use during computation. @@ -65,20 +68,26 @@ class OllamaLLM(BaseLLM): num_predict: Optional[int] = None """Maximum number of tokens to predict when generating text. - (Default: 128, -1 = infinite generation, -2 = fill context)""" + (Default: ``128``) + + - ``-1`` = Infinite generation + - ``-2`` = Fill context""" repeat_last_n: Optional[int] = None - """Sets how far back for the model to look back to prevent - repetition. (Default: 64, 0 = disabled, -1 = num_ctx)""" + """Sets how far back for the model to look back to prevent repetition. + (Default: 64) + + - ``0`` = Disabled + - ``-1`` = ``num_ctx``""" repeat_penalty: Optional[float] = None - """Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) - will penalize repetitions more strongly, while a lower value (e.g., 0.9) - will be more lenient. (Default: 1.1)""" + """Sets how strongly to penalize repetitions. A higher value (e.g., ``1.5``) + will penalize repetitions more strongly, while a lower value (e.g., ``0.9``) + will be more lenient. (Default: ``1.1``)""" temperature: Optional[float] = None """The temperature of the model. Increasing the temperature will - make the model answer more creatively. (Default: 0.8)""" + make the model answer more creatively. (Default: ``0.8``)""" seed: Optional[int] = None """Sets the random number seed to use for generation. Setting this @@ -90,21 +99,21 @@ class OllamaLLM(BaseLLM): tfs_z: Optional[float] = None """Tail free sampling is used to reduce the impact of less probable - tokens from the output. A higher value (e.g., 2.0) will reduce the - impact more, while a value of 1.0 disables this setting. (default: 1)""" + tokens from the output. A higher value (e.g., ``2.0``) will reduce the + impact more, while a value of 1.0 disables this setting. (Default: ``1``)""" top_k: Optional[int] = None - """Reduces the probability of generating nonsense. A higher value (e.g. 100) - will give more diverse answers, while a lower value (e.g. 10) - will be more conservative. (Default: 40)""" + """Reduces the probability of generating nonsense. A higher value (e.g. + ``100``) will give more diverse answers, while a lower value (e.g. ``10``) + will be more conservative. (Default: ``40``)""" top_p: Optional[float] = None - """Works together with top-k. A higher value (e.g., 0.95) will lead - to more diverse text, while a lower value (e.g., 0.5) will - generate more focused and conservative text. (Default: 0.9)""" + """Works together with top-k. A higher value (e.g., ``0.95``) will lead + to more diverse text, while a lower value (e.g., ``0.5``) will + generate more focused and conservative text. (Default: ``0.9``)""" format: Literal["", "json"] = "" - """Specify the format of the output (options: json)""" + """Specify the format of the output.""" keep_alive: Optional[Union[int, str]] = None """How long the model will stay loaded into memory.""" @@ -115,21 +124,20 @@ class OllamaLLM(BaseLLM): client_kwargs: Optional[dict] = {} """Additional kwargs to pass to the httpx clients. These arguments are passed to both synchronous and async clients. - Use sync_client_kwargs and async_client_kwargs to pass different arguments - to synchronous and asynchronous clients. + Use ``sync_client_kwargs`` and ``async_client_kwargs`` to pass different + arguments to synchronous and asynchronous clients. """ async_client_kwargs: Optional[dict] = {} - """Additional kwargs to merge with client_kwargs before passing to the HTTPX - AsyncClient. - - For a full list of the params, see the `HTTPX documentation `__. + """Additional kwargs to merge with ``client_kwargs`` before + passing to the httpx ``AsyncClient``. + `Full list of params. `__ """ sync_client_kwargs: Optional[dict] = {} - """Additional kwargs to merge with client_kwargs before passing to the HTTPX Client. - - For a full list of the params, see the `HTTPX documentation `__. + """Additional kwargs to merge with ``client_kwargs`` before + passing to the httpx ``Client``. + `Full list of params. `__ """ _client: Client = PrivateAttr(default=None) # type: ignore