diff --git a/libs/core/langchain_core/callbacks/usage.py b/libs/core/langchain_core/callbacks/usage.py index 8a04b046818..e30d77ba2ce 100644 --- a/libs/core/langchain_core/callbacks/usage.py +++ b/libs/core/langchain_core/callbacks/usage.py @@ -98,7 +98,7 @@ def get_usage_metadata_callback( Args: name (str): The name of the context variable. Defaults to - ``"usage_metadata_callback"``. + ``'usage_metadata_callback'``. Example: .. code-block:: python diff --git a/libs/core/langchain_core/document_loaders/langsmith.py b/libs/core/langchain_core/document_loaders/langsmith.py index 57cac1347c5..b9a5de4fac3 100644 --- a/libs/core/langchain_core/document_loaders/langsmith.py +++ b/libs/core/langchain_core/document_loaders/langsmith.py @@ -61,7 +61,7 @@ class LangSmithLoader(BaseLoader): Args: dataset_id: The ID of the dataset to filter by. Defaults to None. dataset_name: The name of the dataset to filter by. Defaults to None. - content_key: The inputs key to set as Document page content. ``"."`` characters + content_key: The inputs key to set as Document page content. ``'.'`` characters are interpreted as nested keys. E.g. ``content_key="first.second"`` will result in ``Document(page_content=format_content(example.inputs["first"]["second"]))`` diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 073db30ed3e..dcd3809bee5 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -1367,12 +1367,13 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): """Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: - - an OpenAI function/tool schema, - - a JSON Schema, - - a TypedDict class, - - or a Pydantic class. + schema: The output schema. Can be passed in as: + + - an OpenAI function/tool schema, + - a JSON Schema, + - a TypedDict class, + - or a Pydantic class. + If ``schema`` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a @@ -1386,7 +1387,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. @@ -1397,9 +1398,10 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): Otherwise, if ``include_raw`` is False then Runnable outputs a dict. If ``include_raw`` is True, then Runnable outputs a dict with keys: - - ``"raw"``: BaseMessage - - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``"parsing_error"``: Optional[BaseException] + + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] Example: Pydantic schema (include_raw=False): .. code-block:: python diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 56ac81226f9..aa87dc0edd6 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -736,10 +736,10 @@ class Runnable(ABC, Generic[Input, Output]): Args: input: The input to the Runnable. config: A config to use when invoking the Runnable. - The config supports standard keys like 'tags', 'metadata' for tracing - purposes, 'max_concurrency' for controlling how much work to do - in parallel, and other keys. Please refer to the RunnableConfig - for more details. + The config supports standard keys like ``'tags'``, ``'metadata'`` for + tracing purposes, ``'max_concurrency'`` for controlling how much work to + do in parallel, and other keys. Please refer to the RunnableConfig + for more details. Defaults to None. Returns: The output of the Runnable. @@ -885,9 +885,9 @@ class Runnable(ABC, Generic[Input, Output]): Args: inputs: A list of inputs to the Runnable. config: A config to use when invoking the Runnable. - The config supports standard keys like 'tags', 'metadata' for tracing - purposes, 'max_concurrency' for controlling how much work to do - in parallel, and other keys. Please refer to the RunnableConfig + The config supports standard keys like ``'tags'``, ``'metadata'`` for + tracing purposes, ``'max_concurrency'`` for controlling how much work to + do in parallel, and other keys. Please refer to the RunnableConfig for more details. Defaults to None. return_exceptions: Whether to return exceptions instead of raising them. Defaults to False. @@ -950,10 +950,10 @@ class Runnable(ABC, Generic[Input, Output]): Args: inputs: A list of inputs to the Runnable. config: A config to use when invoking the Runnable. - The config supports standard keys like 'tags', 'metadata' for tracing - purposes, 'max_concurrency' for controlling how much work to do - in parallel, and other keys. Please refer to the RunnableConfig - for more details. Defaults to None. Defaults to None. + The config supports standard keys like ``'tags'``, ``'metadata'`` for + tracing purposes, ``'max_concurrency'`` for controlling how much work to + do in parallel, and other keys. Please refer to the RunnableConfig + for more details. Defaults to None. return_exceptions: Whether to return exceptions instead of raising them. Defaults to False. kwargs: Additional keyword arguments to pass to the Runnable. @@ -1569,18 +1569,17 @@ class Runnable(ABC, Generic[Input, Output]): ) -> Runnable[Input, Output]: """Bind lifecycle listeners to a Runnable, returning a new Runnable. - on_start: Called before the Runnable starts running, with the Run object. - on_end: Called after the Runnable finishes running, with the Run object. - on_error: Called if the Runnable throws an error, with the Run object. - The Run object contains information about the run, including its id, type, input, output, error, start_time, end_time, and any tags or metadata added to the run. Args: - on_start: Called before the Runnable starts running. Defaults to None. - on_end: Called after the Runnable finishes running. Defaults to None. - on_error: Called if the Runnable throws an error. Defaults to None. + on_start: Called before the Runnable starts running, with the Run object. + Defaults to None. + on_end: Called after the Runnable finishes running, with the Run object. + Defaults to None. + on_error: Called if the Runnable throws an error, with the Run object. + Defaults to None. Returns: A new Runnable with the listeners bound. @@ -1636,21 +1635,17 @@ class Runnable(ABC, Generic[Input, Output]): ) -> Runnable[Input, Output]: """Bind async lifecycle listeners to a Runnable, returning a new Runnable. - on_start: Asynchronously called before the Runnable starts running. - on_end: Asynchronously called after the Runnable finishes running. - on_error: Asynchronously called if the Runnable throws an error. - The Run object contains information about the run, including its id, type, input, output, error, start_time, end_time, and any tags or metadata added to the run. Args: - on_start: Asynchronously called before the Runnable starts running. - Defaults to None. - on_end: Asynchronously called after the Runnable finishes running. - Defaults to None. - on_error: Asynchronously called if the Runnable throws an error. - Defaults to None. + on_start: Called asynchronously before the Runnable starts running, + with the Run object. Defaults to None. + on_end: Called asynchronously after the Runnable finishes running, + with the Run object. Defaults to None. + on_error: Called asynchronously if the Runnable throws an error, + with the Run object. Defaults to None. Returns: A new Runnable with the listeners bound. @@ -2550,7 +2545,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]): which: The ConfigurableField instance that will be used to select the alternative. default_key: The default key to use if no alternative is selected. - Defaults to "default". + Defaults to ``'default'``. prefix_keys: Whether to prefix the keys with the ConfigurableField id. Defaults to False. **kwargs: A dictionary of keys to Runnable instances or callables that @@ -5215,6 +5210,10 @@ class RunnableEach(RunnableEachBase[Input, Output]): ) -> RunnableEach[Input, Output]: """Bind lifecycle listeners to a Runnable, returning a new Runnable. + The Run object contains information about the run, including its id, + type, input, output, error, start_time, end_time, and any tags or metadata + added to the run. + Args: on_start: Called before the Runnable starts running, with the Run object. Defaults to None. @@ -5225,10 +5224,6 @@ class RunnableEach(RunnableEachBase[Input, Output]): Returns: A new Runnable with the listeners bound. - - The Run object contains information about the run, including its id, - type, input, output, error, start_time, end_time, and any tags or metadata - added to the run. """ return RunnableEach( bound=self.bound.with_listeners( @@ -5245,20 +5240,20 @@ class RunnableEach(RunnableEachBase[Input, Output]): ) -> RunnableEach[Input, Output]: """Bind async lifecycle listeners to a Runnable, returning a new Runnable. - Args: - on_start: Called asynchronously before the Runnable starts running, - with the Run object. Defaults to None. - on_end: Called asynchronously after the Runnable finishes running, - with the Run object. Defaults to None. - on_error: Called asynchronously if the Runnable throws an error, - with the Run object. Defaults to None. - - Returns: - A new Runnable with the listeners bound. - The Run object contains information about the run, including its id, type, input, output, error, start_time, end_time, and any tags or metadata added to the run. + + Args: + on_start: Called asynchronously before the Runnable starts running, + with the Run object. Defaults to None. + on_end: Called asynchronously after the Runnable finishes running, + with the Run object. Defaults to None. + on_error: Called asynchronously if the Runnable throws an error, + with the Run object. Defaults to None. + + Returns: + A new Runnable with the listeners bound. """ return RunnableEach( bound=self.bound.with_alisteners( @@ -5768,6 +5763,10 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): ) -> Runnable[Input, Output]: """Bind lifecycle listeners to a Runnable, returning a new Runnable. + The Run object contains information about the run, including its id, + type, input, output, error, start_time, end_time, and any tags or metadata + added to the run. + Args: on_start: Called before the Runnable starts running, with the Run object. Defaults to None. @@ -5777,9 +5776,7 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): Defaults to None. Returns: - The Runnable object contains information about the run, including its id, - type, input, output, error, start_time, end_time, and any tags or metadata - added to the run. + A new Runnable with the listeners bound. """ from langchain_core.tracers.root_listeners import RootListenersTracer diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index 4f2963353f8..f79acb433d9 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -544,7 +544,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): """The alternatives to choose from.""" default_key: str = "default" - """The enum value to use for the default option. Defaults to "default".""" + """The enum value to use for the default option. Defaults to ``'default'``.""" prefix_keys: bool """Whether to prefix configurable fields of each alternative with a namespace diff --git a/libs/core/langchain_core/tracers/context.py b/libs/core/langchain_core/tracers/context.py index 466bc2f9f11..58471a09ecc 100644 --- a/libs/core/langchain_core/tracers/context.py +++ b/libs/core/langchain_core/tracers/context.py @@ -62,7 +62,7 @@ def tracing_v2_enabled( Args: project_name (str, optional): The name of the project. - Defaults to "default". + Defaults to ``'default'``. example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (list[str], optional): The tags to add to the run. diff --git a/libs/langchain/langchain/pydantic_v1/__init__.py b/libs/langchain/langchain/pydantic_v1/__init__.py index 2cabc6622be..e372ca8105f 100644 --- a/libs/langchain/langchain/pydantic_v1/__init__.py +++ b/libs/langchain/langchain/pydantic_v1/__init__.py @@ -7,7 +7,7 @@ from langchain_core._api import warn_deprecated # attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules. # # This hack is done for the following reasons: -# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since +# * LangChain will attempt to remain compatible with both pydantic v1 and v2 since # both dependencies and dependents may be stuck on either version of v1 or v2. # * Creating namespaces for pydantic v1 and v2 should allow us to write code that # unambiguously uses either v1 or v2 API. diff --git a/libs/langchain/tests/README.md b/libs/langchain/tests/README.md index 881c196edf4..533abea5323 100644 --- a/libs/langchain/tests/README.md +++ b/libs/langchain/tests/README.md @@ -1,3 +1,3 @@ -# Langchain Tests +# LangChain Tests [This guide has moved to the docs](https://python.langchain.com/docs/contributing/testing) diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index ea055d349ce..41b51322086 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -982,7 +982,7 @@ class ChatAnthropic(BaseChatModel): .. versionadded:: 0.3.15 The cache lifetime is 5 minutes by default. If this is too short, you can - apply one hour caching by enabling the ``"extended-cache-ttl-2025-04-11"`` + apply one hour caching by enabling the ``'extended-cache-ttl-2025-04-11'`` beta header: .. code-block:: python @@ -1593,8 +1593,8 @@ class ChatAnthropic(BaseChatModel): tool_choice: Which tool to require the model to call. Options are: - name of the tool as a string or as dict ``{"type": "tool", "name": "<>"}``: calls corresponding tool; - - ``"auto"``, ``{"type: "auto"}``, or ``None``: automatically selects a tool (including no tool); - - ``"any"`` or ``{"type: "any"}``: force at least one tool to be called; + - ``'auto'``, ``{"type: "auto"}``, or ``None``: automatically selects a tool (including no tool); + - ``'any'`` or ``{"type: "any"}``: force at least one tool to be called; parallel_tool_calls: Set to ``False`` to disable parallel tool use. Defaults to ``None`` (no specification, which allows parallel tool use). @@ -1801,9 +1801,10 @@ class ChatAnthropic(BaseChatModel): Otherwise, if ``include_raw`` is ``False`` then Runnable outputs a dict. If ``include_raw`` is True, then Runnable outputs a dict with keys: - - ``raw``: BaseMessage - - ``parsed``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``parsing_error``: Optional[BaseException] + + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] Example: Pydantic schema (include_raw=False): diff --git a/libs/partners/chroma/langchain_chroma/vectorstores.py b/libs/partners/chroma/langchain_chroma/vectorstores.py index 693f4a10cd3..9c84f57fc46 100644 --- a/libs/partners/chroma/langchain_chroma/vectorstores.py +++ b/libs/partners/chroma/langchain_chroma/vectorstores.py @@ -888,14 +888,14 @@ class Chroma(VectorStore): Args: uri (str): URI of the image to search for. - k (int, optional): Number of results to return. Defaults to DEFAULT_K. + k (int, optional): Number of results to return. Defaults to ``DEFAULT_K``. filter (Optional[Dict[str, str]], optional): Filter by metadata. **kwargs (Any): Additional arguments to pass to function. Returns: List of Images most similar to the provided image. - Each element in list is a Langchain Document Object. + Each element in list is a LangChain Document Object. The page content is b64 encoded image, metadata is default or as defined by user. @@ -938,7 +938,7 @@ class Chroma(VectorStore): Returns: List[Tuple[Document, float]]: List of tuples containing documents similar to the query image and their similarity scores. - 0th element in each tuple is a Langchain Document Object. + 0th element in each tuple is a LangChain Document Object. The page content is b64 encoded img, metadata is default or defined by user. Raises: diff --git a/libs/partners/deepseek/langchain_deepseek/chat_models.py b/libs/partners/deepseek/langchain_deepseek/chat_models.py index 8bf4d0516a7..785e3a0058a 100644 --- a/libs/partners/deepseek/langchain_deepseek/chat_models.py +++ b/libs/partners/deepseek/langchain_deepseek/chat_models.py @@ -353,8 +353,7 @@ class ChatDeepSeek(BaseChatOpenAI): """Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: + schema: The output schema. Can be passed in as: - an OpenAI function/tool schema, - a JSON Schema, @@ -370,14 +369,14 @@ class ChatDeepSeek(BaseChatOpenAI): method: The method for steering model generation, one of: - - "function_calling": + - ``'function_calling'``: Uses DeepSeek's `tool-calling features `_. - - "json_mode": + - ``'json_mode'``: Uses DeepSeek's `JSON mode feature `_. .. versionchanged:: 0.1.3 - Added support for ``"json_mode"``. + Added support for ``'json_mode'``. include_raw: If False then only the parsed structured output is returned. If @@ -385,7 +384,7 @@ class ChatDeepSeek(BaseChatOpenAI): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. strict: Whether to enable strict schema adherence when generating the function @@ -399,13 +398,14 @@ class ChatDeepSeek(BaseChatOpenAI): Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. - | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs + an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. - | If ``include_raw`` is True, then Runnable outputs a dict with keys: + If ``include_raw`` is True, then Runnable outputs a dict with keys: - - "raw": BaseMessage - - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - "parsing_error": Optional[BaseException] + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] """ # noqa: E501 # Some applications require that incompatible parameters (e.g., unsupported diff --git a/libs/partners/fireworks/langchain_fireworks/chat_models.py b/libs/partners/fireworks/langchain_fireworks/chat_models.py index 313fab789f3..79934d346ed 100644 --- a/libs/partners/fireworks/langchain_fireworks/chat_models.py +++ b/libs/partners/fireworks/langchain_fireworks/chat_models.py @@ -302,7 +302,7 @@ class ChatFireworks(BaseChatModel): @classmethod def is_lc_serializable(cls) -> bool: - """Return whether this model can be serialized by Langchain.""" + """Return whether this model can be serialized by LangChain.""" return True client: Any = Field(default=None, exclude=True) #: :meta private: @@ -649,7 +649,7 @@ class ChatFireworks(BaseChatModel): their schema dictionary representation. function_call: Which function to require the model to call. Must be the name of the single provided function or - "auto" to automatically determine which function to call + ``'auto'`` to automatically determine which function to call (if any). **kwargs: Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor. @@ -746,12 +746,13 @@ class ChatFireworks(BaseChatModel): """Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: - - an OpenAI function/tool schema, - - a JSON Schema, - - a TypedDict class (support added in 0.1.7), - - or a Pydantic class. + schema: The output schema. Can be passed in as: + + - an OpenAI function/tool schema, + - a JSON Schema, + - a TypedDict class (support added in 0.1.7), + - or a Pydantic class. + If ``schema`` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a @@ -774,7 +775,7 @@ class ChatFireworks(BaseChatModel): .. versionchanged:: 0.2.8 - Added support for ``"json_schema"``. + Added support for ``'json_schema'``. include_raw: If False then only the parsed structured output is returned. If @@ -782,10 +783,11 @@ class ChatFireworks(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. kwargs: - Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor. + Any additional parameters to pass to the + :class:`~langchain.runnable.Runnable` constructor. Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. @@ -796,9 +798,10 @@ class ChatFireworks(BaseChatModel): Otherwise, if ``include_raw`` is False then Runnable outputs a dict. If ``include_raw`` is True, then Runnable outputs a dict with keys: - - ``"raw"``: BaseMessage - - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``"parsing_error"``: Optional[BaseException] + + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] Example: schema=Pydantic class, method="function_calling", include_raw=False: diff --git a/libs/partners/groq/langchain_groq/chat_models.py b/libs/partners/groq/langchain_groq/chat_models.py index 361e90b77e8..b39112d8848 100644 --- a/libs/partners/groq/langchain_groq/chat_models.py +++ b/libs/partners/groq/langchain_groq/chat_models.py @@ -360,7 +360,7 @@ class ChatGroq(BaseChatModel): request_timeout: Union[float, tuple[float, float], Any, None] = Field( default=None, alias="timeout" ) - """Timeout for requests to Groq completion API. Can be float, httpx.Timeout or + """Timeout for requests to Groq completion API. Can be float, ``httpx.Timeout`` or None.""" max_retries: int = 2 """Maximum number of retries to make when generating.""" @@ -486,7 +486,7 @@ class ChatGroq(BaseChatModel): @classmethod def is_lc_serializable(cls) -> bool: - """Return whether this model can be serialized by Langchain.""" + """Return whether this model can be serialized by LangChain.""" return True # @@ -774,7 +774,7 @@ class ChatGroq(BaseChatModel): their schema dictionary representation. function_call: Which function to require the model to call. Must be the name of the single provided function or - "auto" to automatically determine which function to call + ``'auto'`` to automatically determine which function to call (if any). **kwargs: Any additional parameters to pass to :meth:`~langchain_groq.chat_models.ChatGroq.bind`. @@ -869,8 +869,7 @@ class ChatGroq(BaseChatModel): r"""Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: + schema: The output schema. Can be passed in as: - an OpenAI function/tool schema, - a JSON Schema, @@ -887,6 +886,7 @@ class ChatGroq(BaseChatModel): .. versionchanged:: 0.1.9 Added support for TypedDict class. + method: The method for steering model generation, either ``'function_calling'`` or ``'json_mode'``. If ``'function_calling'`` then the schema will be converted @@ -909,6 +909,7 @@ class ChatGroq(BaseChatModel): response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. + kwargs: Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor. @@ -923,11 +924,12 @@ class ChatGroq(BaseChatModel): If ``include_raw`` is True, then Runnable outputs a dict with keys: - - ``"raw"``: BaseMessage - - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``"parsing_error"``: Optional[BaseException] + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] Example: schema=Pydantic class, method="function_calling", include_raw=False: + .. code-block:: python from typing import Optional diff --git a/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py b/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py index 7145326bb93..2813ae23289 100644 --- a/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py +++ b/libs/partners/huggingface/langchain_huggingface/chat_models/huggingface.py @@ -818,7 +818,7 @@ class ChatHuggingFace(BaseChatModel): :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`. tool_choice: Which tool to require the model to call. Must be the name of the single provided function or - "auto" to automatically determine which function to call + ``'auto'`` to automatically determine which function to call (if any), or a dict of the form: {"type": "function", "function": {"name": <>}}. **kwargs: Any additional parameters to pass to the @@ -873,19 +873,19 @@ class ChatHuggingFace(BaseChatModel): """Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: - - an OpenAI function/tool schema, - - a JSON Schema, - - a typedDict class (support added in 0.1.7), + schema: The output schema. Can be passed in as: + + - an OpenAI function/tool schema, + - a JSON Schema, + - a typedDict class (support added in 0.1.7), Pydantic class is currently supported. method: The method for steering model generation, one of: - - "function_calling": uses tool-calling features. - - "json_schema": uses dedicated structured output features. - - "json_mode": uses JSON mode. + - ``'function_calling'``: uses tool-calling features. + - ``'json_schema'``: uses dedicated structured output features. + - ``'json_mode'``: uses JSON mode. include_raw: If False then only the parsed structured output is returned. If @@ -893,7 +893,7 @@ class ChatHuggingFace(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. kwargs: Additional parameters to pass to the underlying LLM's @@ -909,9 +909,10 @@ class ChatHuggingFace(BaseChatModel): Otherwise, if ``include_raw`` is False then Runnable outputs a dict. If ``include_raw`` is True, then Runnable outputs a dict with keys: - - ``"raw"``: BaseMessage - - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``"parsing_error"``: Optional[BaseException] + + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] """ # noqa: E501 _ = kwargs.pop("strict", None) diff --git a/libs/partners/mistralai/langchain_mistralai/chat_models.py b/libs/partners/mistralai/langchain_mistralai/chat_models.py index 358f4ea73a4..51f1687c06b 100644 --- a/libs/partners/mistralai/langchain_mistralai/chat_models.py +++ b/libs/partners/mistralai/langchain_mistralai/chat_models.py @@ -292,7 +292,7 @@ def _convert_chunk_to_message_chunk( def _format_tool_call_for_mistral(tool_call: ToolCall) -> dict: - """Format Langchain ToolCall to dict expected by Mistral.""" + """Format LangChain ToolCall to dict expected by Mistral.""" result: dict[str, Any] = { "function": { "name": tool_call["name"], @@ -306,7 +306,7 @@ def _format_tool_call_for_mistral(tool_call: ToolCall) -> dict: def _format_invalid_tool_call_for_mistral(invalid_tool_call: InvalidToolCall) -> dict: - """Format Langchain InvalidToolCall to dict expected by Mistral.""" + """Format LangChain InvalidToolCall to dict expected by Mistral.""" result: dict[str, Any] = { "function": { "name": invalid_tool_call["name"], @@ -707,7 +707,7 @@ class ChatMistralAI(BaseChatModel): :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`. tool_choice: Which tool to require the model to call. Must be the name of the single provided function or - "auto" to automatically determine which function to call + ``'auto'`` to automatically determine which function to call (if any), or a dict of the form: {"type": "function", "function": {"name": <>}}. kwargs: Any additional parameters are passed directly to @@ -746,12 +746,13 @@ class ChatMistralAI(BaseChatModel): r"""Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: - - an OpenAI function/tool schema, - - a JSON Schema, - - a TypedDict class (support added in 0.1.12), - - or a Pydantic class. + schema: The output schema. Can be passed in as: + + - an OpenAI function/tool schema, + - a JSON Schema, + - a TypedDict class (support added in 0.1.12), + - or a Pydantic class. + If ``schema`` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a @@ -765,13 +766,13 @@ class ChatMistralAI(BaseChatModel): method: The method for steering model generation, one of: - - "function_calling": + - ``'function_calling'``: Uses Mistral's `function-calling feature `_. - - "json_schema": + - ``'json_schema'``: Uses Mistral's `structured output feature `_. - - "json_mode": + - ``'json_mode'``: Uses Mistral's `JSON mode `_. Note that if using JSON mode then you @@ -788,7 +789,7 @@ class ChatMistralAI(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. kwargs: Any additional parameters are passed directly to ``self.bind(**kwargs)``. This is useful for passing in @@ -805,9 +806,9 @@ class ChatMistralAI(BaseChatModel): Otherwise, if ``include_raw`` is False then Runnable outputs a dict. If ``include_raw`` is True, then Runnable outputs a dict with keys: - - ``"raw"``: BaseMessage - - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - ``"parsing_error"``: Optional[BaseException] + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] Example: schema=Pydantic class, method="function_calling", include_raw=False: .. code-block:: python @@ -1073,7 +1074,7 @@ class ChatMistralAI(BaseChatModel): @classmethod def is_lc_serializable(cls) -> bool: - """Return whether this model can be serialized by Langchain.""" + """Return whether this model can be serialized by LangChain.""" return True @classmethod diff --git a/libs/partners/ollama/langchain_ollama/chat_models.py b/libs/partners/ollama/langchain_ollama/chat_models.py index dcecf0fb8d2..fbc36ddd7b6 100644 --- a/libs/partners/ollama/langchain_ollama/chat_models.py +++ b/libs/partners/ollama/langchain_ollama/chat_models.py @@ -1044,8 +1044,7 @@ class ChatOllama(BaseChatModel): """Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: + schema: The output schema. Can be passed in as: - a Pydantic class, - a JSON schema @@ -1061,11 +1060,11 @@ class ChatOllama(BaseChatModel): method: The method for steering model generation, one of: - - "json_schema": + - ``'json_schema'``: Uses Ollama's `structured output API `__ - - "function_calling": + - ``'function_calling'``: Uses Ollama's tool-calling API - - "json_mode": + - ``'json_mode'``: Specifies ``format="json"``. Note that if using JSON mode then you must include instructions for formatting the output into the desired schema into the model call. @@ -1076,20 +1075,20 @@ class ChatOllama(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. kwargs: Additional keyword args aren't supported. Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. - | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. - | If ``include_raw`` is True, then Runnable outputs a dict with keys: + If ``include_raw`` is True, then Runnable outputs a dict with keys: - - "raw": BaseMessage - - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - "parsing_error": Optional[BaseException] + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] .. versionchanged:: 0.2.2 @@ -1097,7 +1096,7 @@ class ChatOllama(BaseChatModel): .. versionchanged:: 0.3.0 - Updated default ``method`` to ``"json_schema"``. + Updated default ``method`` to ``'json_schema'``. .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=False diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 29aac46e7cd..6327220a73c 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -42,7 +42,7 @@ class AzureChatOpenAI(BaseChatOpenAI): """Azure OpenAI chat model integration. Setup: - Head to the https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python + Head to the Azure `OpenAI quickstart guide `__ to create your Azure OpenAI deployment. Then install ``langchain-openai`` and set environment variables @@ -68,8 +68,7 @@ class AzureChatOpenAI(BaseChatOpenAI): Key init args — client params: api_version: str Azure OpenAI REST API version to use (distinct from the version of the - underlying model). See more on the different versions here: - https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning + underlying model). `See more on the different versions. `__ timeout: Union[float, Tuple[float, float], Any, None] Timeout for requests. max_retries: Optional[int] @@ -104,9 +103,13 @@ class AzureChatOpenAI(BaseChatOpenAI): # other params... ) - **NOTE**: Any param which is not explicitly supported will be passed directly to the - ``openai.AzureOpenAI.chat.completions.create(...)`` API every time to the model is - invoked. For example: + .. note:: + Any param which is not explicitly supported will be passed directly to the + ``openai.AzureOpenAI.chat.completions.create(...)`` API every time to the model is + invoked. + + For example: + .. code-block:: python from langchain_openai import AzureChatOpenAI @@ -470,21 +473,23 @@ class AzureChatOpenAI(BaseChatOpenAI): ) """Your Azure endpoint, including the resource. - Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. + Automatically inferred from env var ``AZURE_OPENAI_ENDPOINT`` if not provided. - Example: `https://example-resource.azure.openai.com/` + Example: ``https://example-resource.azure.openai.com/`` """ deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment") """A model deployment. - If given sets the base client URL to include `/deployments/{azure_deployment}`. - Note: this means you won't be able to use non-deployment endpoints. + If given sets the base client URL to include ``/deployments/{azure_deployment}`` + + .. note:: + This means you won't be able to use non-deployment endpoints. """ openai_api_version: Optional[str] = Field( alias="api_version", default_factory=from_env("OPENAI_API_VERSION", default=None), ) - """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" + """Automatically inferred from env var ``OPENAI_API_VERSION`` if not provided.""" # Check OPENAI_API_KEY for backwards compatibility. # TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using # other forms of azure credentials. @@ -494,22 +499,21 @@ class AzureChatOpenAI(BaseChatOpenAI): ["AZURE_OPENAI_API_KEY", "OPENAI_API_KEY"], default=None ), ) - """Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.""" + """Automatically inferred from env var ``AZURE_OPENAI_API_KEY`` if not provided.""" azure_ad_token: Optional[SecretStr] = Field( default_factory=secret_from_env("AZURE_OPENAI_AD_TOKEN", default=None) ) """Your Azure Active Directory token. - Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. + Automatically inferred from env var ``AZURE_OPENAI_AD_TOKEN`` if not provided. - For more: - https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. + For more, see `this page `__. """ azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. Will be invoked on every sync request. For async requests, - will be invoked if `azure_ad_async_token_provider` is not provided. + will be invoked if ``azure_ad_async_token_provider`` is not provided. """ azure_ad_async_token_provider: Union[Callable[[], Awaitable[str]], None] = None @@ -519,7 +523,7 @@ class AzureChatOpenAI(BaseChatOpenAI): """ model_version: str = "" - """The version of the model (e.g. "0125" for gpt-3.5-0125). + """The version of the model (e.g. ``'0125'`` for ``'gpt-3.5-0125'``). Azure OpenAI doesn't return model version with the response by default so it must be manually specified if you want to use this information downstream, e.g. when @@ -534,18 +538,21 @@ class AzureChatOpenAI(BaseChatOpenAI): openai_api_type: Optional[str] = Field( default_factory=from_env("OPENAI_API_TYPE", default="azure") ) - """Legacy, for openai<1.0.0 support.""" + """Legacy, for ``openai<1.0.0`` support.""" validate_base_url: bool = True - """If legacy arg openai_api_base is passed in, try to infer if it is a base_url or - azure_endpoint and update client params accordingly. + """If legacy arg ``openai_api_base`` is passed in, try to infer if it is a + ``base_url`` or ``azure_endpoint`` and update client params accordingly. """ model_name: Optional[str] = Field(default=None, alias="model") # type: ignore[assignment] - """Name of the deployed OpenAI model, e.g. "gpt-4o", "gpt-35-turbo", etc. + """Name of the deployed OpenAI model, e.g. ``'gpt-4o'``, ``'gpt-35-turbo'``, etc. Distinct from the Azure deployment name, which is set by the Azure user. - Used for tracing and token counting. Does NOT affect completion. + Used for tracing and token counting. + + .. warning:: + Does NOT affect completion. """ disabled_params: Optional[dict[str, Any]] = Field(default=None) @@ -556,7 +563,7 @@ class AzureChatOpenAI(BaseChatOpenAI): parameter and the value is either None, meaning that parameter should never be used, or it's a list of disabled values for the parameter. - For example, older models may not support the 'parallel_tool_calls' parameter at + For example, older models may not support the ``'parallel_tool_calls'`` parameter at all, in which case ``disabled_params={"parallel_tool_calls: None}`` can ben passed in. @@ -567,7 +574,7 @@ class AzureChatOpenAI(BaseChatOpenAI): invocation. By default, unless ``model_name="gpt-4o"`` is specified, then - 'parallel_tools_calls' will be disabled. + ``'parallel_tools_calls'`` will be disabled. """ @classmethod @@ -776,8 +783,7 @@ class AzureChatOpenAI(BaseChatOpenAI): """Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: + schema: The output schema. Can be passed in as: - a JSON Schema, - a TypedDict class, @@ -793,25 +799,20 @@ class AzureChatOpenAI(BaseChatOpenAI): method: The method for steering model generation, one of: - - "json_schema": - Uses OpenAI's Structured Output API: - https://platform.openai.com/docs/guides/structured-outputs - Supported for "gpt-4o-mini", "gpt-4o-2024-08-06", "o1", and later + - ``'json_schema'``: + Uses OpenAI's `Structured Output API `__. + Supported for ``'gpt-4o-mini'``, ``'gpt-4o-2024-08-06'``, ``'o1'``, and later models. - - "function_calling": + - ``'function_calling'``: Uses OpenAI's tool-calling (formerly called function calling) - API: https://platform.openai.com/docs/guides/function-calling - - "json_mode": - Uses OpenAI's JSON mode. Note that if using JSON mode then you - must include instructions for formatting the output into the - desired schema into the model call: - https://platform.openai.com/docs/guides/structured-outputs/json-mode + `API `__ + - ``'json_mode'``: + Uses OpenAI's `JSON mode `__. + Note that if using JSON mode then you must include instructions for + formatting the output into the desired schema into the model call Learn more about the differences between the methods and which models - support which methods here: - - - https://platform.openai.com/docs/guides/structured-outputs/structured-outputs-vs-json-mode - - https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format + support which methods `here `__. include_raw: If False then only the parsed structured output is returned. If @@ -819,13 +820,12 @@ class AzureChatOpenAI(BaseChatOpenAI): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. strict: - True: Model output is guaranteed to exactly match the schema. - The input schema will also be validated according to - https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + The input schema will also be validated according to the `supported schemas `__. - False: Input schema will not be validated and model output will not be validated. @@ -835,17 +835,18 @@ class AzureChatOpenAI(BaseChatOpenAI): If schema is specified via TypedDict or JSON schema, ``strict`` is not enabled by default. Pass ``strict=True`` to enable it. - Note: ``strict`` can only be non-null if ``method`` is - ``"json_schema"`` or ``"function_calling"``. + .. note: + ``strict`` can only be non-null if ``method`` is + ``'json_schema'`` or ``'function_calling'``. tools: A list of tool-like objects to bind to the chat model. Requires that: - - ``method`` is ``"json_schema"`` (default). + - ``method`` is ``'json_schema'`` (default). - ``strict=True`` - ``include_raw=True`` If a model elects to call a - tool, the resulting ``AIMessage`` in ``"raw"`` will include tool calls. + tool, the resulting ``AIMessage`` in ``'raw'`` will include tool calls. .. dropdown:: Example @@ -887,13 +888,14 @@ class AzureChatOpenAI(BaseChatOpenAI): Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. - | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs + an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. - | If ``include_raw`` is True, then Runnable outputs a dict with keys: + If ``include_raw`` is True, then Runnable outputs a dict with keys: - - "raw": BaseMessage - - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - "parsing_error": Optional[BaseException] + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] .. versionchanged:: 0.1.20 @@ -921,7 +923,7 @@ class AzureChatOpenAI(BaseChatOpenAI): specify any Field metadata (like min/max constraints) and fields cannot have default values. - See all constraints here: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + See all constraints `here `__. .. code-block:: python diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 9cfd47b5334..bc70f8f0c39 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -461,7 +461,7 @@ class BaseChatOpenAI(BaseChatModel): """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" openai_organization: Optional[str] = Field(default=None, alias="organization") - """Automatically inferred from env var `OPENAI_ORG_ID` if not provided.""" + """Automatically inferred from env var ``OPENAI_ORG_ID`` if not provided.""" # to support explicit proxy for OpenAI openai_proxy: Optional[str] = Field( default_factory=from_env("OPENAI_PROXY", default=None) @@ -469,7 +469,7 @@ class BaseChatOpenAI(BaseChatModel): request_timeout: Union[float, tuple[float, float], Any, None] = Field( default=None, alias="timeout" ) - """Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or + """Timeout for requests to OpenAI completion API. Can be float, ``httpx.Timeout`` or None.""" stream_usage: bool = False """Whether to include usage metadata in streaming output. If True, an additional @@ -547,7 +547,7 @@ class BaseChatOpenAI(BaseChatModel): invocations. """ http_async_client: Union[Any, None] = Field(default=None, exclude=True) - """Optional httpx.AsyncClient. Only used for async invocations. Must specify + """Optional ``httpx.AsyncClient``. Only used for async invocations. Must specify ``http_client`` as well if you'd like a custom client for sync invocations.""" stop: Optional[Union[list[str], str]] = Field(default=None, alias="stop_sequences") """Default stop sequences.""" @@ -565,12 +565,14 @@ class BaseChatOpenAI(BaseChatModel): - Any other provider-specific parameters .. note:: + Do NOT use ``model_kwargs`` for custom parameters that are not part of the standard OpenAI API, as this will cause errors when making API calls. Use - ``extra_body`` instead. + ``extra_body`` instead. """ + include_response_headers: bool = False - """Whether to include response headers in the output message response_metadata.""" + """Whether to include response headers in the output message ``response_metadata``.""" # noqa: E501 disabled_params: Optional[dict[str, Any]] = Field(default=None) """Parameters of the OpenAI client or chat.completions endpoint that should be disabled for the given model. @@ -579,7 +581,7 @@ class BaseChatOpenAI(BaseChatModel): parameter and the value is either None, meaning that parameter should never be used, or it's a list of disabled values for the parameter. - For example, older models may not support the 'parallel_tool_calls' parameter at + For example, older models may not support the ``'parallel_tool_calls'`` parameter at all, in which case ``disabled_params={"parallel_tool_calls": None}`` can be passed in. @@ -677,7 +679,7 @@ class BaseChatOpenAI(BaseChatModel): - ``'responses/v1'``: Formats Responses API output items into AIMessage content blocks. - Currently only impacts the Responses API. ``output_version="responses/v1"`` is + Currently only impacts the Responses API. ``output_version='responses/v1'`` is recommended. .. versionadded:: 0.3.25 @@ -1587,7 +1589,7 @@ class BaseChatOpenAI(BaseChatModel): their schema dictionary representation. function_call: Which function to require the model to call. Must be the name of the single provided function or - "auto" to automatically determine which function to call + ``'auto'`` to automatically determine which function to call (if any). **kwargs: Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor. @@ -1645,9 +1647,8 @@ class BaseChatOpenAI(BaseChatModel): - dict of the form ``{"type": "function", "function": {"name": <>}}``: calls <> tool. - ``False`` or ``None``: no effect, default OpenAI behavior. strict: If True, model output is guaranteed to exactly match the JSON Schema - provided in the tool definition. If True, the input schema will be - validated according to - https://platform.openai.com/docs/guides/structured-outputs/supported-schemas. + provided in the tool definition. The input schema will also be validated according to the + `supported schemas `__. If False, input schema will not be validated and model output will not be validated. If None, ``strict`` argument will not be passed to the model. @@ -1718,8 +1719,7 @@ class BaseChatOpenAI(BaseChatModel): """Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: + schema: The output schema. Can be passed in as: - an OpenAI function/tool schema, - a JSON Schema, @@ -1735,24 +1735,20 @@ class BaseChatOpenAI(BaseChatModel): method: The method for steering model generation, one of: - - "function_calling": + - ``'function_calling'``: Uses OpenAI's tool-calling (formerly called function calling) - API: https://platform.openai.com/docs/guides/function-calling - - "json_schema": - Uses OpenAI's Structured Output API: https://platform.openai.com/docs/guides/structured-outputs - Supported for "gpt-4o-mini", "gpt-4o-2024-08-06", "o1", and later + `API `__ + - ``'json_schema'``: + Uses OpenAI's Structured Output `API `__ + Supported for ``'gpt-4o-mini'``, ``'gpt-4o-2024-08-06'``, ``'o1'``, and later models. - - "json_mode": - Uses OpenAI's JSON mode. Note that if using JSON mode then you - must include instructions for formatting the output into the - desired schema into the model call: - https://platform.openai.com/docs/guides/structured-outputs/json-mode + - ``'json_mode'``: + Uses OpenAI's `JSON mode `__. + Note that if using JSON mode then you must include instructions for + formatting the output into the desired schema into the model call Learn more about the differences between the methods and which models - support which methods here: - - - https://platform.openai.com/docs/guides/structured-outputs/structured-outputs-vs-json-mode - - https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format + support which methods `here `__. include_raw: If False then only the parsed structured output is returned. If @@ -1760,13 +1756,12 @@ class BaseChatOpenAI(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. strict: - True: Model output is guaranteed to exactly match the schema. - The input schema will also be validated according to - https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + The input schema will also be validated according to the `supported schemas `__. - False: Input schema will not be validated and model output will not be validated. @@ -1823,13 +1818,14 @@ class BaseChatOpenAI(BaseChatModel): Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. - | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs + an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. - | If ``include_raw`` is True, then Runnable outputs a dict with keys: + If ``include_raw`` is True, then Runnable outputs a dict with keys: - - "raw": BaseMessage - - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - "parsing_error": Optional[BaseException] + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] .. versionchanged:: 0.1.20 @@ -1838,7 +1834,7 @@ class BaseChatOpenAI(BaseChatModel): .. versionchanged:: 0.1.21 Support for ``strict`` argument added. - Support for ``method`` = "json_schema" added. + Support for ``method="json_schema"`` added. .. versionchanged:: 0.3.12 Support for ``tools`` added. @@ -2080,24 +2076,25 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] # other params... ) - **NOTE**: Any param which is not explicitly supported will be passed directly to the - ``openai.OpenAI.chat.completions.create(...)`` API every time to the model is - invoked. For example: + .. note:: + Any param which is not explicitly supported will be passed directly to the + ``openai.OpenAI.chat.completions.create(...)`` API every time to the model is + invoked. For example: - .. code-block:: python + .. code-block:: python - from langchain_openai import ChatOpenAI - import openai + from langchain_openai import ChatOpenAI + import openai - ChatOpenAI(..., frequency_penalty=0.2).invoke(...) + ChatOpenAI(..., frequency_penalty=0.2).invoke(...) - # results in underlying API call of: + # results in underlying API call of: - openai.OpenAI(..).chat.completions.create(..., frequency_penalty=0.2) + openai.OpenAI(..).chat.completions.create(..., frequency_penalty=0.2) - # which is also equivalent to: + # which is also equivalent to: - ChatOpenAI(...).invoke(..., frequency_penalty=0.2) + ChatOpenAI(...).invoke(..., frequency_penalty=0.2) .. dropdown:: Invoke @@ -2264,26 +2261,27 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] }, ] - Note that ``openai >= 1.32`` supports a ``parallel_tool_calls`` parameter - that defaults to ``True``. This parameter can be set to ``False`` to - disable parallel tool calls: + .. note:: + ``openai >= 1.32`` supports a ``parallel_tool_calls`` parameter + that defaults to ``True``. This parameter can be set to ``False`` to + disable parallel tool calls: - .. code-block:: python + .. code-block:: python - ai_msg = llm_with_tools.invoke( - "What is the weather in LA and NY?", parallel_tool_calls=False - ) - ai_msg.tool_calls + ai_msg = llm_with_tools.invoke( + "What is the weather in LA and NY?", parallel_tool_calls=False + ) + ai_msg.tool_calls - .. code-block:: python + .. code-block:: python - [ - { - "name": "GetWeather", - "args": {"location": "Los Angeles, CA"}, - "id": "call_4OoY0ZR99iEvC7fevsH8Uhtz", - } - ] + [ + { + "name": "GetWeather", + "args": {"location": "Los Angeles, CA"}, + "id": "call_4OoY0ZR99iEvC7fevsH8Uhtz", + } + ] Like other runtime parameters, ``parallel_tool_calls`` can be bound to a model using ``llm.bind(parallel_tool_calls=False)`` or during instantiation by @@ -2297,7 +2295,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] You can access `built-in tools `_ supported by the OpenAI Responses API. See LangChain - `docs `_ for more + `docs `__ for more detail. .. note:: @@ -2352,7 +2350,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] `conversation state `_. Passing in response IDs from previous messages will continue a conversational thread. See LangChain - `docs `_ for more + `conversation docs `__ for more detail. .. code-block:: python @@ -2641,14 +2639,15 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] llm = ChatOpenAI(model="o4-mini", service_tier="flex") Note that this is a beta feature that is only available for a subset of models. - See OpenAI `docs `_ + See OpenAI `flex processing docs `__ for more detail. .. dropdown:: OpenAI-compatible APIs - ``ChatOpenAI`` can be used with OpenAI-compatible APIs like LM Studio, vLLM, - Ollama, and others. To use custom parameters specific to these providers, - use the ``extra_body`` parameter. + ``ChatOpenAI`` can be used with OpenAI-compatible APIs like `LM Studio `__, + `vLLM `__, + `Ollama `__, and others. + To use custom parameters specific to these providers, use the ``extra_body`` parameter. **LM Studio example** with TTL (auto-eviction): @@ -2681,7 +2680,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] Use the correct parameter for different types of API arguments: - **Use `model_kwargs` for:** + **Use ``model_kwargs`` for:** - Standard OpenAI API parameters not explicitly defined as class parameters - Parameters that should be flattened into the top-level request payload @@ -2700,7 +2699,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] }, ) - **Use `extra_body` for:** + **Use ``extra_body`` for:** - Custom parameters specific to OpenAI-compatible providers (vLLM, LM Studio, etc.) - Parameters that need to be nested under ``extra_body`` in the request @@ -2760,7 +2759,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] @classmethod def is_lc_serializable(cls) -> bool: - """Return whether this model can be serialized by Langchain.""" + """Return whether this model can be serialized by LangChain.""" return True @property @@ -2822,8 +2821,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] """Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: + schema: The output schema. Can be passed in as: - a JSON Schema, - a TypedDict class, @@ -2839,25 +2837,20 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] method: The method for steering model generation, one of: - - "json_schema": - Uses OpenAI's Structured Output API: - https://platform.openai.com/docs/guides/structured-outputs - Supported for "gpt-4o-mini", "gpt-4o-2024-08-06", "o1", and later + - ``'json_schema'``: + Uses OpenAI's `Structured Output API `__. + Supported for ``'gpt-4o-mini'``, ``'gpt-4o-2024-08-06'``, ``'o1'``, and later models. - - "function_calling": + - ``'function_calling'``: Uses OpenAI's tool-calling (formerly called function calling) - API: https://platform.openai.com/docs/guides/function-calling - - "json_mode": - Uses OpenAI's JSON mode. Note that if using JSON mode then you - must include instructions for formatting the output into the - desired schema into the model call: - https://platform.openai.com/docs/guides/structured-outputs/json-mode + `API `__ + - ``'json_mode'``: + Uses OpenAI's `JSON mode `__. + Note that if using JSON mode then you must include instructions for + formatting the output into the desired schema into the model call Learn more about the differences between the methods and which models - support which methods here: - - - https://platform.openai.com/docs/guides/structured-outputs/structured-outputs-vs-json-mode - - https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format + support which methods `here `__. include_raw: If False then only the parsed structured output is returned. If @@ -2865,13 +2858,12 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. strict: - True: Model output is guaranteed to exactly match the schema. - The input schema will also be validated according to - https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + The input schema will also be validated according to the `supported schemas `__. - False: Input schema will not be validated and model output will not be validated. @@ -2933,13 +2925,14 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. - | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs + an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. - | If ``include_raw`` is True, then Runnable outputs a dict with keys: + If ``include_raw`` is True, then Runnable outputs a dict with keys: - - "raw": BaseMessage - - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - "parsing_error": Optional[BaseException] + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] .. versionchanged:: 0.1.20 @@ -2967,7 +2960,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] specify any Field metadata (like min/max constraints) and fields cannot have default values. - See all constraints here: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + See all constraints `here `__. .. code-block:: python diff --git a/libs/partners/openai/langchain_openai/embeddings/azure.py b/libs/partners/openai/langchain_openai/embeddings/azure.py index 9be5c7937ab..5f4db948793 100644 --- a/libs/partners/openai/langchain_openai/embeddings/azure.py +++ b/libs/partners/openai/langchain_openai/embeddings/azure.py @@ -106,7 +106,7 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override] ) """Your Azure endpoint, including the resource. - Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. + Automatically inferred from env var ``AZURE_OPENAI_ENDPOINT`` if not provided. Example: `https://example-resource.azure.openai.com/` """ @@ -125,30 +125,30 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override] ["AZURE_OPENAI_API_KEY", "OPENAI_API_KEY"], default=None ), ) - """Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.""" + """Automatically inferred from env var ``AZURE_OPENAI_API_KEY`` if not provided.""" openai_api_version: Optional[str] = Field( default_factory=from_env("OPENAI_API_VERSION", default="2023-05-15"), alias="api_version", ) - """Automatically inferred from env var `OPENAI_API_VERSION` if not provided. + """Automatically inferred from env var ``OPENAI_API_VERSION`` if not provided. - Set to "2023-05-15" by default if env variable `OPENAI_API_VERSION` is not set. + Set to ``'2023-05-15'`` by default if env variable ``OPENAI_API_VERSION`` is not + set. """ azure_ad_token: Optional[SecretStr] = Field( default_factory=secret_from_env("AZURE_OPENAI_AD_TOKEN", default=None) ) """Your Azure Active Directory token. - Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. + Automatically inferred from env var ``AZURE_OPENAI_AD_TOKEN`` if not provided. - For more: - https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. + `For more, see this page. `__ """ azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. Will be invoked on every sync request. For async requests, - will be invoked if `azure_ad_async_token_provider` is not provided. + will be invoked if ``azure_ad_async_token_provider`` is not provided. """ azure_ad_async_token_provider: Union[Callable[[], Awaitable[str]], None] = None """A function that returns an Azure Active Directory token. diff --git a/libs/partners/openai/langchain_openai/llms/azure.py b/libs/partners/openai/langchain_openai/llms/azure.py index 7b51d79576f..6e99c815ea2 100644 --- a/libs/partners/openai/langchain_openai/llms/azure.py +++ b/libs/partners/openai/langchain_openai/llms/azure.py @@ -37,9 +37,9 @@ class AzureOpenAI(BaseOpenAI): ) """Your Azure endpoint, including the resource. - Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided. + Automatically inferred from env var ``AZURE_OPENAI_ENDPOINT`` if not provided. - Example: `https://example-resource.azure.openai.com/` + Example: ``'https://example-resource.azure.openai.com/'`` """ deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment") """A model deployment. @@ -51,7 +51,7 @@ class AzureOpenAI(BaseOpenAI): alias="api_version", default_factory=from_env("OPENAI_API_VERSION", default=None), ) - """Automatically inferred from env var `OPENAI_API_VERSION` if not provided.""" + """Automatically inferred from env var ``OPENAI_API_VERSION`` if not provided.""" # Check OPENAI_KEY for backwards compatibility. # TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using # other forms of azure credentials. @@ -66,16 +66,15 @@ class AzureOpenAI(BaseOpenAI): ) """Your Azure Active Directory token. - Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided. + Automatically inferred from env var ``AZURE_OPENAI_AD_TOKEN`` if not provided. - For more: - https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. + `For more, see this page .`__ """ azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. Will be invoked on every sync request. For async requests, - will be invoked if `azure_ad_async_token_provider` is not provided. + will be invoked if ``azure_ad_async_token_provider`` is not provided. """ azure_ad_async_token_provider: Union[Callable[[], Awaitable[str]], None] = None """A function that returns an Azure Active Directory token. @@ -85,7 +84,7 @@ class AzureOpenAI(BaseOpenAI): openai_api_type: Optional[str] = Field( default_factory=from_env("OPENAI_API_TYPE", default="azure") ) - """Legacy, for openai<1.0.0 support.""" + """Legacy, for ``openai<1.0.0`` support.""" validate_base_url: bool = True """For backwards compatibility. If legacy val openai_api_base is passed in, try to infer if it is a base_url or azure_endpoint and update accordingly. @@ -105,7 +104,7 @@ class AzureOpenAI(BaseOpenAI): @classmethod def is_lc_serializable(cls) -> bool: - """Return whether this model can be serialized by Langchain.""" + """Return whether this model can be serialized by LangChain.""" return True @model_validator(mode="after") diff --git a/libs/partners/openai/langchain_openai/llms/base.py b/libs/partners/openai/langchain_openai/llms/base.py index b4ee88ee327..d33649c237d 100644 --- a/libs/partners/openai/langchain_openai/llms/base.py +++ b/libs/partners/openai/langchain_openai/llms/base.py @@ -98,7 +98,7 @@ class BaseOpenAI(BaseLLM): request_timeout: Union[float, tuple[float, float], Any, None] = Field( default=None, alias="timeout" ) - """Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or + """Timeout for requests to OpenAI completion API. Can be float, ``httpx.Timeout`` or None.""" logit_bias: Optional[dict[str, float]] = None """Adjust the probability of specific tokens being generated.""" @@ -685,7 +685,7 @@ class OpenAI(BaseOpenAI): @classmethod def is_lc_serializable(cls) -> bool: - """Return whether this model can be serialized by Langchain.""" + """Return whether this model can be serialized by LangChain.""" return True @property diff --git a/libs/partners/perplexity/langchain_perplexity/chat_models.py b/libs/partners/perplexity/langchain_perplexity/chat_models.py index a106750581c..00174dc9cc1 100644 --- a/libs/partners/perplexity/langchain_perplexity/chat_models.py +++ b/libs/partners/perplexity/langchain_perplexity/chat_models.py @@ -408,11 +408,10 @@ class ChatPerplexity(BaseChatModel): ) -> Runnable[LanguageModelInput, _DictOrPydantic]: """Model wrapper that returns outputs formatted to match the given schema for Preplexity. Currently, Perplexity only supports "json_schema" method for structured output - as per their official documentation: https://docs.perplexity.ai/guides/structured-outputs + as per their `official documentation `__. Args: - schema: - The output schema. Can be passed in as: + schema: The output schema. Can be passed in as: - a JSON Schema, - a TypedDict class, @@ -420,7 +419,7 @@ class ChatPerplexity(BaseChatModel): method: The method for steering model generation, currently only support: - - "json_schema": Use the JSON Schema to parse the model output + - ``'json_schema'``: Use the JSON Schema to parse the model output include_raw: @@ -429,7 +428,7 @@ class ChatPerplexity(BaseChatModel): then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". + with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``. strict: Unsupported: whether to enable strict schema adherence when generating @@ -441,13 +440,14 @@ class ChatPerplexity(BaseChatModel): Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. - | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs + an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. - | If ``include_raw`` is True, then Runnable outputs a dict with keys: + If ``include_raw`` is True, then Runnable outputs a dict with keys: - - "raw": BaseMessage - - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - - "parsing_error": Optional[BaseException] + - ``'raw'``: BaseMessage + - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``'parsing_error'``: Optional[BaseException] """ # noqa: E501 if method in ("function_calling", "json_mode"): diff --git a/libs/partners/prompty/langchain_prompty/langchain.py b/libs/partners/prompty/langchain_prompty/langchain.py index 3246d3aab93..32c1eaba4c2 100644 --- a/libs/partners/prompty/langchain_prompty/langchain.py +++ b/libs/partners/prompty/langchain_prompty/langchain.py @@ -11,7 +11,7 @@ def create_chat_prompt( path: str, input_name_agent_scratchpad: str = "agent_scratchpad", ) -> Runnable[dict[str, Any], ChatPromptTemplate]: - """Create a chat prompt from a Langchain schema.""" + """Create a chat prompt from a LangChain schema.""" def runnable_chat_lambda(inputs: dict[str, Any]) -> ChatPromptTemplate: p = load(path) diff --git a/libs/partners/prompty/langchain_prompty/utils.py b/libs/partners/prompty/langchain_prompty/utils.py index bbee4140996..970a2f83db3 100644 --- a/libs/partners/prompty/langchain_prompty/utils.py +++ b/libs/partners/prompty/langchain_prompty/utils.py @@ -19,7 +19,7 @@ def load(prompt_path: str, configuration: str = "default") -> Prompty: Args: prompt_path: The path to the prompty file. - configuration: The configuration to use. Defaults to "default". + configuration: The configuration to use. Defaults to ``'default'``. Returns: The Prompty object. @@ -126,7 +126,7 @@ def prepare( Args: prompt: The Prompty object. - inputs: The inputs to the prompty. Defaults to {}. + inputs: The inputs to the prompty. Defaults to ``{}``. Returns: The prepared inputs. @@ -176,8 +176,8 @@ def run( Args: prompt: The Prompty object. content: The content to run the prompty on. - configuration: The configuration to use. Defaults to {}. - parameters: The parameters to use. Defaults to {}. + configuration: The configuration to use. Defaults to ``{}``. + parameters: The parameters to use. Defaults to ``{}``. raw: Whether to return the raw output. Defaults to False. Returns: @@ -230,11 +230,11 @@ def execute( Args: prompt: The prompt to execute. Can be a path to a prompty file or a Prompty object. - configuration: The configuration to use. Defaults to {}. - parameters: The parameters to use. Defaults to {}. - inputs: The inputs to the prompty. Defaults to {}. + configuration: The configuration to use. Defaults to ``{}``. + parameters: The parameters to use. Defaults to ``{}``. + inputs: The inputs to the prompty. Defaults to ``{}``. raw: Whether to return the raw output. Defaults to False. - connection: The connection to use. Defaults to "default". + connection: The connection to use. Defaults to ``'default'``. Returns: The result of executing the prompty. diff --git a/libs/partners/qdrant/langchain_qdrant/vectorstores.py b/libs/partners/qdrant/langchain_qdrant/vectorstores.py index a7a4d437641..9744368cb29 100644 --- a/libs/partners/qdrant/langchain_qdrant/vectorstores.py +++ b/libs/partners/qdrant/langchain_qdrant/vectorstores.py @@ -594,7 +594,7 @@ class Qdrant(VectorStore): limit=k, offset=offset, with_payload=True, - with_vectors=False, # Langchain does not expect vectors to be returned + with_vectors=False, # LangChain does not expect vectors to be returned score_threshold=score_threshold, consistency=consistency, **kwargs, @@ -689,7 +689,7 @@ class Qdrant(VectorStore): limit=k, offset=offset, with_payload=True, - with_vectors=False, # Langchain does not expect vectors to be returned + with_vectors=False, # LangChain does not expect vectors to be returned score_threshold=score_threshold, consistency=consistency, **kwargs, diff --git a/libs/partners/xai/langchain_xai/chat_models.py b/libs/partners/xai/langchain_xai/chat_models.py index 4c47dba9151..8763dd680be 100644 --- a/libs/partners/xai/langchain_xai/chat_models.py +++ b/libs/partners/xai/langchain_xai/chat_models.py @@ -430,7 +430,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] @classmethod def is_lc_serializable(cls) -> bool: - """Return whether this model can be serialized by Langchain.""" + """Return whether this model can be serialized by LangChain.""" return True @property @@ -562,8 +562,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] """Model wrapper that returns outputs formatted to match the given schema. Args: - schema: - The output schema. Can be passed in as: + schema: The output schema. Can be passed in as: - an OpenAI function/tool schema, - a JSON Schema, @@ -597,7 +596,7 @@ class ChatXAI(BaseChatOpenAI): # type: ignore[override] strict: - ``True``: Model output is guaranteed to exactly match the schema. - The input schema will also be validated according to `this schema `__. + The input schema will also be validated according to the `supported schemas `__. - ``False``: Input schema will not be validated and model output will not be validated. diff --git a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py index fa05d08927a..5513e31d275 100644 --- a/libs/standard-tests/langchain_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py @@ -546,8 +546,8 @@ class ChatModelIntegrationTests(ChatModelTests): To add configuration to VCR, add a ``conftest.py`` file to the ``tests/`` directory and implement the ``vcr_config`` fixture there. - ``langchain-tests`` excludes the headers ``"authorization"``, - ``"x-api-key"``, and ``"api-key"`` from VCR cassettes. To pick up this + ``langchain-tests`` excludes the headers ``'authorization'``, + ``'x-api-key'``, and ``'api-key'`` from VCR cassettes. To pick up this configuration, you will need to add ``conftest.py`` as shown below. You can also exclude additional headers, override the default exclusions, or apply other customizations to the VCR configuration. See example below: @@ -582,7 +582,7 @@ class ChatModelIntegrationTests(ChatModelTests): .. dropdown:: Compressing cassettes ``langchain-tests`` includes a custom VCR serializer that compresses - cassettes using gzip. To use it, register the ``"yaml.gz"`` serializer + cassettes using gzip. To use it, register the ``yaml.gz`` serializer to your VCR fixture and enable this serializer in the config. See example below: @@ -995,7 +995,7 @@ class ChatModelIntegrationTests(ChatModelTests): )] ) - Check also that the response includes a ``"model_name"`` key in its + Check also that the response includes a ``'model_name'`` key in its ``usage_metadata``. """ if not self.returns_usage_metadata: @@ -1172,7 +1172,7 @@ class ChatModelIntegrationTests(ChatModelTests): )] ) - Check also that the aggregated response includes a ``"model_name"`` key + Check also that the aggregated response includes a ``'model_name'`` key in its ``usage_metadata``. """ if not self.returns_usage_metadata: @@ -1309,7 +1309,7 @@ class ChatModelIntegrationTests(ChatModelTests): super().test_tool_calling(model) Otherwise, in the case that only one tool is bound, ensure that - ``tool_choice`` supports the string ``"any"`` to force calling that tool. + ``tool_choice`` supports the string ``'any'`` to force calling that tool. """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1382,7 +1382,7 @@ class ChatModelIntegrationTests(ChatModelTests): await super().test_tool_calling_async(model) Otherwise, in the case that only one tool is bound, ensure that - ``tool_choice`` supports the string ``"any"`` to force calling that tool. + ``tool_choice`` supports the string ``'any'`` to force calling that tool. """ if not self.has_tool_calling: pytest.skip("Test requires tool calling.") @@ -1657,7 +1657,7 @@ class ChatModelIntegrationTests(ChatModelTests): supports forced tool calling. If it does, ``bind_tools`` should accept a ``tool_choice`` parameter that can be used to force a tool call. - It should accept (1) the string ``"any"`` to force calling the bound tool, + It should accept (1) the string ``'any'`` to force calling the bound tool, and (2) the string name of the tool to force calling that tool. """ @@ -1718,7 +1718,7 @@ class ChatModelIntegrationTests(ChatModelTests): super().test_tool_calling_with_no_arguments(model) Otherwise, in the case that only one tool is bound, ensure that - ``tool_choice`` supports the string ``"any"`` to force calling that tool. + ``tool_choice`` supports the string ``'any'`` to force calling that tool. """ # noqa: E501 if not self.has_tool_calling: pytest.skip("Test requires tool calling.") diff --git a/libs/standard-tests/langchain_tests/unit_tests/chat_models.py b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py index 1b746c4c541..d4fa85f4a85 100644 --- a/libs/standard-tests/langchain_tests/unit_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py @@ -317,7 +317,7 @@ class ChatModelUnitTests(ChatModelTests): .. warning:: Deprecated since version 0.3.15: This property will be removed in version 0.3.20. If a model does not support forcing tool calling, override the ``has_tool_choice`` property to - return ``False``. Otherwise, models should accept values of ``"any"`` or + return ``False``. Otherwise, models should accept values of ``'any'`` or the name of a tool in ``tool_choice``. Example: @@ -653,8 +653,8 @@ class ChatModelUnitTests(ChatModelTests): To add configuration to VCR, add a ``conftest.py`` file to the ``tests/`` directory and implement the ``vcr_config`` fixture there. - ``langchain-tests`` excludes the headers ``"authorization"``, - ``"x-api-key"``, and ``"api-key"`` from VCR cassettes. To pick up this + ``langchain-tests`` excludes the headers ``'authorization'``, + ``'x-api-key'``, and ``'api-key'`` from VCR cassettes. To pick up this configuration, you will need to add ``conftest.py`` as shown below. You can also exclude additional headers, override the default exclusions, or apply other customizations to the VCR configuration. See example below: @@ -689,7 +689,7 @@ class ChatModelUnitTests(ChatModelTests): .. dropdown:: Compressing cassettes ``langchain-tests`` includes a custom VCR serializer that compresses - cassettes using gzip. To use it, register the ``"yaml.gz"`` serializer + cassettes using gzip. To use it, register the ``yaml.gz`` serializer to your VCR fixture and enable this serializer in the config. See example below: @@ -950,8 +950,7 @@ class ChatModelUnitTests(ChatModelTests): .. dropdown:: Troubleshooting - If this test fails, check that the model accommodates standard parameters: - https://python.langchain.com/docs/concepts/chat_models/#standard-parameters + If this test fails, check that the model accommodates `standard parameters `__. Check also that the model class is named according to convention (e.g., ``ChatProviderName``).