docs: nits

This commit is contained in:
Mason Daugherty 2025-06-30 09:02:41 -04:00
parent 428c276948
commit 7d898e3b23
No known key found for this signature in database
5 changed files with 82 additions and 72 deletions

View File

@ -105,10 +105,10 @@ class BaseLanguageModel(
cache: Union[BaseCache, bool, None] = Field(default=None, exclude=True) cache: Union[BaseCache, bool, None] = Field(default=None, exclude=True)
"""Whether to cache the response. """Whether to cache the response.
* If true, will use the global cache. * If ``true``, will use the global cache.
* If false, will not use a cache * If ``false``, will not use a cache
* If None, will use the global cache if it's set, otherwise no cache. * If ``None``, will use the global cache if it's set, otherwise no cache.
* If instance of BaseCache, will use the provided cache. * If instance of ``BaseCache``, will use the provided cache.
Caching is not currently supported for streaming methods of models. Caching is not currently supported for streaming methods of models.
""" """
@ -374,7 +374,7 @@ class BaseLanguageModel(
Useful for checking if an input fits in a model's context window. Useful for checking if an input fits in a model's context window.
**Note**: the base implementation of get_num_tokens_from_messages ignores .. NOTE:: The base implementation of ``get_num_tokens_from_messages()`` ignores
tool schemas. tool schemas.
Args: Args:

View File

@ -736,8 +736,8 @@ class Runnable(Generic[Input, Output], ABC):
Args: Args:
input: The input to the Runnable. input: The input to the Runnable.
config: A config to use when invoking the Runnable. config: A config to use when invoking the Runnable.
The config supports standard keys like 'tags', 'metadata' for tracing The config supports standard keys like ``tags``, ``metadata`` for tracing
purposes, 'max_concurrency' for controlling how much work to do purposes, ``max_concurrency`` for controlling how much work to do
in parallel, and other keys. Please refer to the RunnableConfig in parallel, and other keys. Please refer to the RunnableConfig
for more details. for more details.
@ -751,10 +751,10 @@ class Runnable(Generic[Input, Output], ABC):
config: Optional[RunnableConfig] = None, config: Optional[RunnableConfig] = None,
**kwargs: Any, **kwargs: Any,
) -> Output: ) -> Output:
"""Default implementation of ainvoke, calls invoke from a thread. """Default implementation of ``ainvoke()``, calls ``invoke()`` from a thread.
The default implementation allows usage of async code even if The default implementation allows usage of async code even if
the Runnable did not implement a native async version of invoke. the Runnable did not implement a native async version of ``invoke()``.
Subclasses should override this method if they can run asynchronously. Subclasses should override this method if they can run asynchronously.
""" """
@ -768,13 +768,14 @@ class Runnable(Generic[Input, Output], ABC):
return_exceptions: bool = False, return_exceptions: bool = False,
**kwargs: Optional[Any], **kwargs: Optional[Any],
) -> list[Output]: ) -> list[Output]:
"""Default implementation runs invoke in parallel using a thread pool executor. """Default implementation runs ``invoke()`` in parallel using a thread pool
executor.
The default implementation of batch works well for IO bound runnables. The default implementation of batch works well for IO bound runnables.
Subclasses should override this method if they can batch more efficiently; Subclasses should override this method if they can batch more efficiently;
e.g., if the underlying Runnable uses an API which supports a batch mode. e.g., if the underlying Runnable uses an API which supports a batch mode.
""" """ # noqa: D205
if not inputs: if not inputs:
return [] return []
@ -824,7 +825,7 @@ class Runnable(Generic[Input, Output], ABC):
return_exceptions: bool = False, return_exceptions: bool = False,
**kwargs: Optional[Any], **kwargs: Optional[Any],
) -> Iterator[tuple[int, Union[Output, Exception]]]: ) -> Iterator[tuple[int, Union[Output, Exception]]]:
"""Run invoke in parallel on a list of inputs. """Run ``invoke()`` in parallel on a list of inputs.
Yields results as they complete. Yields results as they complete.
""" """
@ -875,7 +876,8 @@ class Runnable(Generic[Input, Output], ABC):
return_exceptions: bool = False, return_exceptions: bool = False,
**kwargs: Optional[Any], **kwargs: Optional[Any],
) -> list[Output]: ) -> list[Output]:
"""Default implementation runs ainvoke in parallel using asyncio.gather. """Default implementation runs ``ainvoke()`` in parallel using
``asyncio.gather``.
The default implementation of batch works well for IO bound runnables. The default implementation of batch works well for IO bound runnables.
@ -885,17 +887,17 @@ class Runnable(Generic[Input, Output], ABC):
Args: Args:
inputs: A list of inputs to the Runnable. inputs: A list of inputs to the Runnable.
config: A config to use when invoking the Runnable. config: A config to use when invoking the Runnable.
The config supports standard keys like 'tags', 'metadata' for tracing The config supports standard keys like ``tags``, ``metadata`` for
purposes, 'max_concurrency' for controlling how much work to do tracing purposes, ``max_concurrency`` for controlling how much work to
in parallel, and other keys. Please refer to the RunnableConfig do in parallel, and other keys. Please refer to the RunnableConfig
for more details. Defaults to None. for more details. Defaults to ``None``.
return_exceptions: Whether to return exceptions instead of raising them. return_exceptions: Whether to return exceptions instead of raising them.
Defaults to False. Defaults to ``False``.
kwargs: Additional keyword arguments to pass to the Runnable. kwargs: Additional keyword arguments to pass to the Runnable.
Returns: Returns:
A list of outputs from the Runnable. A list of outputs from the Runnable.
""" """ # noqa: D205
if not inputs: if not inputs:
return [] return []
@ -943,19 +945,19 @@ class Runnable(Generic[Input, Output], ABC):
return_exceptions: bool = False, return_exceptions: bool = False,
**kwargs: Optional[Any], **kwargs: Optional[Any],
) -> AsyncIterator[tuple[int, Union[Output, Exception]]]: ) -> AsyncIterator[tuple[int, Union[Output, Exception]]]:
"""Run ainvoke in parallel on a list of inputs. """Run ``ainvoke()`` in parallel on a list of inputs.
Yields results as they complete. Yields results as they complete.
Args: Args:
inputs: A list of inputs to the Runnable. inputs: A list of inputs to the Runnable.
config: A config to use when invoking the Runnable. config: A config to use when invoking the Runnable.
The config supports standard keys like 'tags', 'metadata' for tracing The config supports standard keys like ``tags``, ``metadata`` for
purposes, 'max_concurrency' for controlling how much work to do tracing purposes, ``max_concurrency`` for controlling how much work to
in parallel, and other keys. Please refer to the RunnableConfig do in parallel, and other keys. Please refer to the RunnableConfig
for more details. Defaults to None. Defaults to None. for more details. Defaults to ``None``.
return_exceptions: Whether to return exceptions instead of raising them. return_exceptions: Whether to return exceptions instead of raising them.
Defaults to False. Defaults to ``False``.
kwargs: Additional keyword arguments to pass to the Runnable. kwargs: Additional keyword arguments to pass to the Runnable.
Yields: Yields:
@ -1019,13 +1021,13 @@ class Runnable(Generic[Input, Output], ABC):
config: Optional[RunnableConfig] = None, config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any], **kwargs: Optional[Any],
) -> AsyncIterator[Output]: ) -> AsyncIterator[Output]:
"""Default implementation of astream, which calls ainvoke. """Default implementation of ``astream()``, which calls ``ainvoke()``.
Subclasses should override this method if they support streaming output. Subclasses should override this method if they support streaming output.
Args: Args:
input: The input to the Runnable. input: The input to the Runnable.
config: The config to use for the Runnable. Defaults to None. config: The config to use for the Runnable. Defaults to ``None``.
kwargs: Additional keyword arguments to pass to the Runnable. kwargs: Additional keyword arguments to pass to the Runnable.
Yields: Yields:
@ -1221,7 +1223,7 @@ class Runnable(Generic[Input, Output], ABC):
In addition to the standard events, users can also dispatch custom events (see example below). In addition to the standard events, users can also dispatch custom events (see example below).
Custom events will be only be surfaced with in the `v2` version of the API! .. NOTE:: Custom events will be only be surfaced with in the `v2` version of the API.
A custom event has following format: A custom event has following format:
@ -1235,7 +1237,7 @@ class Runnable(Generic[Input, Output], ABC):
Here are declarations associated with the standard events shown above: Here are declarations associated with the standard events shown above:
`format_docs`: ``format_docs``:
.. code-block:: python .. code-block:: python
@ -1245,7 +1247,7 @@ class Runnable(Generic[Input, Output], ABC):
format_docs = RunnableLambda(format_docs) format_docs = RunnableLambda(format_docs)
`some_tool`: ``some_tool``:
.. code-block:: python .. code-block:: python
@ -1254,7 +1256,7 @@ class Runnable(Generic[Input, Output], ABC):
'''Some_tool.''' '''Some_tool.'''
return {"x": x, "y": y} return {"x": x, "y": y}
`prompt`: ``prompt``:
.. code-block:: python .. code-block:: python
@ -1354,8 +1356,8 @@ class Runnable(Generic[Input, Output], ABC):
exclude_types: Exclude events from runnables with matching types. exclude_types: Exclude events from runnables with matching types.
exclude_tags: Exclude events from runnables with matching tags. exclude_tags: Exclude events from runnables with matching tags.
kwargs: Additional keyword arguments to pass to the Runnable. kwargs: Additional keyword arguments to pass to the Runnable.
These will be passed to astream_log as this implementation These will be passed to ``astream_log`` as this implementation
of astream_events is built on top of astream_log. of astream_events is built on top of ``astream_log``.
Yields: Yields:
An async stream of StreamEvents. An async stream of StreamEvents.
@ -2551,9 +2553,9 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
which: The ConfigurableField instance that will be used to select the which: The ConfigurableField instance that will be used to select the
alternative. alternative.
default_key: The default key to use if no alternative is selected. default_key: The default key to use if no alternative is selected.
Defaults to "default". Defaults to ``'default'``.
prefix_keys: Whether to prefix the keys with the ConfigurableField id. prefix_keys: Whether to prefix the keys with the ConfigurableField id.
Defaults to False. Defaults to ``False``.
**kwargs: A dictionary of keys to Runnable instances or callables that **kwargs: A dictionary of keys to Runnable instances or callables that
return Runnable instances. return Runnable instances.

View File

@ -544,7 +544,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
"""The alternatives to choose from.""" """The alternatives to choose from."""
default_key: str = "default" default_key: str = "default"
"""The enum value to use for the default option. Defaults to "default".""" """The enum value to use for the default option. Defaults to ``'default'``."""
prefix_keys: bool prefix_keys: bool
"""Whether to prefix configurable fields of each alternative with a namespace """Whether to prefix configurable fields of each alternative with a namespace

View File

@ -62,13 +62,13 @@ def tracing_v2_enabled(
Args: Args:
project_name (str, optional): The name of the project. project_name (str, optional): The name of the project.
Defaults to "default". Defaults to ``'default'``.
example_id (str or UUID, optional): The ID of the example. example_id (str or UUID, optional): The ID of the example.
Defaults to None. Defaults to ``None``.
tags (list[str], optional): The tags to add to the run. tags (list[str], optional): The tags to add to the run.
Defaults to None. Defaults to ``None``.
client (LangSmithClient, optional): The client of the langsmith. client (LangSmithClient, optional): The client of the langsmith.
Defaults to None. Defaults to ``None``.
Yields: Yields:
LangChainTracer: The LangChain tracer. LangChainTracer: The LangChain tracer.

View File

@ -35,27 +35,30 @@ class OllamaLLM(BaseLLM):
"""Model name to use.""" """Model name to use."""
mirostat: Optional[int] = None mirostat: Optional[int] = None
"""Enable Mirostat sampling for controlling perplexity. """Enable Mirostat sampling for controlling perplexity. (Default: ``0``)
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
- ``0`` = Disabled
- ``1`` = Mirostat
- ``2`` = Mirostat 2.0"""
mirostat_eta: Optional[float] = None mirostat_eta: Optional[float] = None
"""Influences how quickly the algorithm responds to feedback """Influences how quickly the algorithm responds to feedback
from the generated text. A lower learning rate will result in from the generated text. A lower learning rate will result in
slower adjustments, while a higher learning rate will make slower adjustments, while a higher learning rate will make
the algorithm more responsive. (Default: 0.1)""" the algorithm more responsive. (Default: ``0.1``)"""
mirostat_tau: Optional[float] = None mirostat_tau: Optional[float] = None
"""Controls the balance between coherence and diversity """Controls the balance between coherence and diversity
of the output. A lower value will result in more focused and of the output. A lower value will result in more focused and
coherent text. (Default: 5.0)""" coherent text. (Default: ``5.0``)"""
num_ctx: Optional[int] = None num_ctx: Optional[int] = None
"""Sets the size of the context window used to generate the """Sets the size of the context window used to generate the
next token. (Default: 2048) """ next token. (Default: ``2048``)"""
num_gpu: Optional[int] = None num_gpu: Optional[int] = None
"""The number of GPUs to use. On macOS it defaults to 1 to """The number of GPUs to use. Defaults to ``1`` on macOS (to enable metal
enable metal support, 0 to disable.""" support). Set to ``0`` to disable."""
num_thread: Optional[int] = None num_thread: Optional[int] = None
"""Sets the number of threads to use during computation. """Sets the number of threads to use during computation.
@ -65,20 +68,26 @@ class OllamaLLM(BaseLLM):
num_predict: Optional[int] = None num_predict: Optional[int] = None
"""Maximum number of tokens to predict when generating text. """Maximum number of tokens to predict when generating text.
(Default: 128, -1 = infinite generation, -2 = fill context)""" (Default: ``128``)
- ``-1`` = Infinite generation
- ``-2`` = Fill context"""
repeat_last_n: Optional[int] = None repeat_last_n: Optional[int] = None
"""Sets how far back for the model to look back to prevent """Sets how far back for the model to look back to prevent repetition.
repetition. (Default: 64, 0 = disabled, -1 = num_ctx)""" (Default: 64)
- ``0`` = Disabled
- ``-1`` = ``num_ctx``"""
repeat_penalty: Optional[float] = None repeat_penalty: Optional[float] = None
"""Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) """Sets how strongly to penalize repetitions. A higher value (e.g., ``1.5``)
will penalize repetitions more strongly, while a lower value (e.g., 0.9) will penalize repetitions more strongly, while a lower value (e.g., ``0.9``)
will be more lenient. (Default: 1.1)""" will be more lenient. (Default: ``1.1``)"""
temperature: Optional[float] = None temperature: Optional[float] = None
"""The temperature of the model. Increasing the temperature will """The temperature of the model. Increasing the temperature will
make the model answer more creatively. (Default: 0.8)""" make the model answer more creatively. (Default: ``0.8``)"""
seed: Optional[int] = None seed: Optional[int] = None
"""Sets the random number seed to use for generation. Setting this """Sets the random number seed to use for generation. Setting this
@ -90,21 +99,21 @@ class OllamaLLM(BaseLLM):
tfs_z: Optional[float] = None tfs_z: Optional[float] = None
"""Tail free sampling is used to reduce the impact of less probable """Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., 2.0) will reduce the tokens from the output. A higher value (e.g., ``2.0``) will reduce the
impact more, while a value of 1.0 disables this setting. (default: 1)""" impact more, while a value of 1.0 disables this setting. (Default: ``1``)"""
top_k: Optional[int] = None top_k: Optional[int] = None
"""Reduces the probability of generating nonsense. A higher value (e.g. 100) """Reduces the probability of generating nonsense. A higher value (e.g.
will give more diverse answers, while a lower value (e.g. 10) ``100``) will give more diverse answers, while a lower value (e.g. ``10``)
will be more conservative. (Default: 40)""" will be more conservative. (Default: ``40``)"""
top_p: Optional[float] = None top_p: Optional[float] = None
"""Works together with top-k. A higher value (e.g., 0.95) will lead """Works together with top-k. A higher value (e.g., ``0.95``) will lead
to more diverse text, while a lower value (e.g., 0.5) will to more diverse text, while a lower value (e.g., ``0.5``) will
generate more focused and conservative text. (Default: 0.9)""" generate more focused and conservative text. (Default: ``0.9``)"""
format: Literal["", "json"] = "" format: Literal["", "json"] = ""
"""Specify the format of the output (options: json)""" """Specify the format of the output."""
keep_alive: Optional[Union[int, str]] = None keep_alive: Optional[Union[int, str]] = None
"""How long the model will stay loaded into memory.""" """How long the model will stay loaded into memory."""
@ -115,21 +124,20 @@ class OllamaLLM(BaseLLM):
client_kwargs: Optional[dict] = {} client_kwargs: Optional[dict] = {}
"""Additional kwargs to pass to the httpx clients. """Additional kwargs to pass to the httpx clients.
These arguments are passed to both synchronous and async clients. These arguments are passed to both synchronous and async clients.
Use sync_client_kwargs and async_client_kwargs to pass different arguments Use ``sync_client_kwargs`` and ``async_client_kwargs`` to pass different
to synchronous and asynchronous clients. arguments to synchronous and asynchronous clients.
""" """
async_client_kwargs: Optional[dict] = {} async_client_kwargs: Optional[dict] = {}
"""Additional kwargs to merge with client_kwargs before passing to the HTTPX """Additional kwargs to merge with ``client_kwargs`` before
AsyncClient. passing to the httpx ``AsyncClient``.
`Full list of params. <https://www.python-httpx.org/api/#asyncclient>`__
For a full list of the params, see the `HTTPX documentation <https://www.python-httpx.org/api/#asyncclient>`__.
""" """
sync_client_kwargs: Optional[dict] = {} sync_client_kwargs: Optional[dict] = {}
"""Additional kwargs to merge with client_kwargs before passing to the HTTPX Client. """Additional kwargs to merge with ``client_kwargs`` before
passing to the httpx ``Client``.
For a full list of the params, see the `HTTPX documentation <https://www.python-httpx.org/api/#client>`__. `Full list of params. <https://www.python-httpx.org/api/#client>`__
""" """
_client: Client = PrivateAttr(default=None) # type: ignore _client: Client = PrivateAttr(default=None) # type: ignore