mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-07 22:11:51 +00:00
fix(docs): capitalization, codeblock formatting, and hyperlinks, note blocks (#32235)
widespread cleanup attempt
This commit is contained in:
@@ -42,7 +42,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
"""Azure OpenAI chat model integration.
|
||||
|
||||
Setup:
|
||||
Head to the https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python
|
||||
Head to the Azure `OpenAI quickstart guide <https://learn.microsoft.com/en-us/azure/ai-foundry/openai/chatgpt-quickstart?tabs=keyless%2Ctypescript-keyless%2Cpython-new%2Ccommand-line&pivots=programming-language-python>`__
|
||||
to create your Azure OpenAI deployment.
|
||||
|
||||
Then install ``langchain-openai`` and set environment variables
|
||||
@@ -68,8 +68,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
Key init args — client params:
|
||||
api_version: str
|
||||
Azure OpenAI REST API version to use (distinct from the version of the
|
||||
underlying model). See more on the different versions here:
|
||||
https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
|
||||
underlying model). `See more on the different versions. <https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning>`__
|
||||
timeout: Union[float, Tuple[float, float], Any, None]
|
||||
Timeout for requests.
|
||||
max_retries: Optional[int]
|
||||
@@ -104,9 +103,13 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
# other params...
|
||||
)
|
||||
|
||||
**NOTE**: Any param which is not explicitly supported will be passed directly to the
|
||||
``openai.AzureOpenAI.chat.completions.create(...)`` API every time to the model is
|
||||
invoked. For example:
|
||||
.. note::
|
||||
Any param which is not explicitly supported will be passed directly to the
|
||||
``openai.AzureOpenAI.chat.completions.create(...)`` API every time to the model is
|
||||
invoked.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import AzureChatOpenAI
|
||||
@@ -470,21 +473,23 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
)
|
||||
"""Your Azure endpoint, including the resource.
|
||||
|
||||
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
|
||||
Automatically inferred from env var ``AZURE_OPENAI_ENDPOINT`` if not provided.
|
||||
|
||||
Example: `https://example-resource.azure.openai.com/`
|
||||
Example: ``https://example-resource.azure.openai.com/``
|
||||
"""
|
||||
deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment")
|
||||
"""A model deployment.
|
||||
|
||||
If given sets the base client URL to include `/deployments/{azure_deployment}`.
|
||||
Note: this means you won't be able to use non-deployment endpoints.
|
||||
If given sets the base client URL to include ``/deployments/{azure_deployment}``
|
||||
|
||||
.. note::
|
||||
This means you won't be able to use non-deployment endpoints.
|
||||
"""
|
||||
openai_api_version: Optional[str] = Field(
|
||||
alias="api_version",
|
||||
default_factory=from_env("OPENAI_API_VERSION", default=None),
|
||||
)
|
||||
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
|
||||
"""Automatically inferred from env var ``OPENAI_API_VERSION`` if not provided."""
|
||||
# Check OPENAI_API_KEY for backwards compatibility.
|
||||
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
|
||||
# other forms of azure credentials.
|
||||
@@ -494,22 +499,21 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
["AZURE_OPENAI_API_KEY", "OPENAI_API_KEY"], default=None
|
||||
),
|
||||
)
|
||||
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
|
||||
"""Automatically inferred from env var ``AZURE_OPENAI_API_KEY`` if not provided."""
|
||||
azure_ad_token: Optional[SecretStr] = Field(
|
||||
default_factory=secret_from_env("AZURE_OPENAI_AD_TOKEN", default=None)
|
||||
)
|
||||
"""Your Azure Active Directory token.
|
||||
|
||||
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
|
||||
Automatically inferred from env var ``AZURE_OPENAI_AD_TOKEN`` if not provided.
|
||||
|
||||
For more:
|
||||
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
|
||||
For more, see `this page <https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id>`__.
|
||||
"""
|
||||
azure_ad_token_provider: Union[Callable[[], str], None] = None
|
||||
"""A function that returns an Azure Active Directory token.
|
||||
|
||||
Will be invoked on every sync request. For async requests,
|
||||
will be invoked if `azure_ad_async_token_provider` is not provided.
|
||||
will be invoked if ``azure_ad_async_token_provider`` is not provided.
|
||||
"""
|
||||
|
||||
azure_ad_async_token_provider: Union[Callable[[], Awaitable[str]], None] = None
|
||||
@@ -519,7 +523,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
"""
|
||||
|
||||
model_version: str = ""
|
||||
"""The version of the model (e.g. "0125" for gpt-3.5-0125).
|
||||
"""The version of the model (e.g. ``'0125'`` for ``'gpt-3.5-0125'``).
|
||||
|
||||
Azure OpenAI doesn't return model version with the response by default so it must
|
||||
be manually specified if you want to use this information downstream, e.g. when
|
||||
@@ -534,18 +538,21 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
openai_api_type: Optional[str] = Field(
|
||||
default_factory=from_env("OPENAI_API_TYPE", default="azure")
|
||||
)
|
||||
"""Legacy, for openai<1.0.0 support."""
|
||||
"""Legacy, for ``openai<1.0.0`` support."""
|
||||
|
||||
validate_base_url: bool = True
|
||||
"""If legacy arg openai_api_base is passed in, try to infer if it is a base_url or
|
||||
azure_endpoint and update client params accordingly.
|
||||
"""If legacy arg ``openai_api_base`` is passed in, try to infer if it is a
|
||||
``base_url`` or ``azure_endpoint`` and update client params accordingly.
|
||||
"""
|
||||
|
||||
model_name: Optional[str] = Field(default=None, alias="model") # type: ignore[assignment]
|
||||
"""Name of the deployed OpenAI model, e.g. "gpt-4o", "gpt-35-turbo", etc.
|
||||
"""Name of the deployed OpenAI model, e.g. ``'gpt-4o'``, ``'gpt-35-turbo'``, etc.
|
||||
|
||||
Distinct from the Azure deployment name, which is set by the Azure user.
|
||||
Used for tracing and token counting. Does NOT affect completion.
|
||||
Used for tracing and token counting.
|
||||
|
||||
.. warning::
|
||||
Does NOT affect completion.
|
||||
"""
|
||||
|
||||
disabled_params: Optional[dict[str, Any]] = Field(default=None)
|
||||
@@ -556,7 +563,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
parameter and the value is either None, meaning that parameter should never be
|
||||
used, or it's a list of disabled values for the parameter.
|
||||
|
||||
For example, older models may not support the 'parallel_tool_calls' parameter at
|
||||
For example, older models may not support the ``'parallel_tool_calls'`` parameter at
|
||||
all, in which case ``disabled_params={"parallel_tool_calls: None}`` can ben passed
|
||||
in.
|
||||
|
||||
@@ -567,7 +574,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
invocation.
|
||||
|
||||
By default, unless ``model_name="gpt-4o"`` is specified, then
|
||||
'parallel_tools_calls' will be disabled.
|
||||
``'parallel_tools_calls'`` will be disabled.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@@ -776,8 +783,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
"""Model wrapper that returns outputs formatted to match the given schema.
|
||||
|
||||
Args:
|
||||
schema:
|
||||
The output schema. Can be passed in as:
|
||||
schema: The output schema. Can be passed in as:
|
||||
|
||||
- a JSON Schema,
|
||||
- a TypedDict class,
|
||||
@@ -793,25 +799,20 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
|
||||
method: The method for steering model generation, one of:
|
||||
|
||||
- "json_schema":
|
||||
Uses OpenAI's Structured Output API:
|
||||
https://platform.openai.com/docs/guides/structured-outputs
|
||||
Supported for "gpt-4o-mini", "gpt-4o-2024-08-06", "o1", and later
|
||||
- ``'json_schema'``:
|
||||
Uses OpenAI's `Structured Output API <https://platform.openai.com/docs/guides/structured-outputs>`__.
|
||||
Supported for ``'gpt-4o-mini'``, ``'gpt-4o-2024-08-06'``, ``'o1'``, and later
|
||||
models.
|
||||
- "function_calling":
|
||||
- ``'function_calling'``:
|
||||
Uses OpenAI's tool-calling (formerly called function calling)
|
||||
API: https://platform.openai.com/docs/guides/function-calling
|
||||
- "json_mode":
|
||||
Uses OpenAI's JSON mode. Note that if using JSON mode then you
|
||||
must include instructions for formatting the output into the
|
||||
desired schema into the model call:
|
||||
https://platform.openai.com/docs/guides/structured-outputs/json-mode
|
||||
`API <https://platform.openai.com/docs/guides/function-calling>`__
|
||||
- ``'json_mode'``:
|
||||
Uses OpenAI's `JSON mode <https://platform.openai.com/docs/guides/structured-outputs/json-mode>`__.
|
||||
Note that if using JSON mode then you must include instructions for
|
||||
formatting the output into the desired schema into the model call
|
||||
|
||||
Learn more about the differences between the methods and which models
|
||||
support which methods here:
|
||||
|
||||
- https://platform.openai.com/docs/guides/structured-outputs/structured-outputs-vs-json-mode
|
||||
- https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format
|
||||
support which methods `here <https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format>`__.
|
||||
|
||||
include_raw:
|
||||
If False then only the parsed structured output is returned. If
|
||||
@@ -819,13 +820,12 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
then both the raw model response (a BaseMessage) and the parsed model
|
||||
response will be returned. If an error occurs during output parsing it
|
||||
will be caught and returned as well. The final output is always a dict
|
||||
with keys "raw", "parsed", and "parsing_error".
|
||||
with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
|
||||
strict:
|
||||
|
||||
- True:
|
||||
Model output is guaranteed to exactly match the schema.
|
||||
The input schema will also be validated according to
|
||||
https://platform.openai.com/docs/guides/structured-outputs/supported-schemas
|
||||
The input schema will also be validated according to the `supported schemas <https://platform.openai.com/docs/guides/structured-outputs/supported-schemas?api-mode=responses#supported-schemas>`__.
|
||||
- False:
|
||||
Input schema will not be validated and model output will not be
|
||||
validated.
|
||||
@@ -835,17 +835,18 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
If schema is specified via TypedDict or JSON schema, ``strict`` is not
|
||||
enabled by default. Pass ``strict=True`` to enable it.
|
||||
|
||||
Note: ``strict`` can only be non-null if ``method`` is
|
||||
``"json_schema"`` or ``"function_calling"``.
|
||||
.. note:
|
||||
``strict`` can only be non-null if ``method`` is
|
||||
``'json_schema'`` or ``'function_calling'``.
|
||||
tools:
|
||||
A list of tool-like objects to bind to the chat model. Requires that:
|
||||
|
||||
- ``method`` is ``"json_schema"`` (default).
|
||||
- ``method`` is ``'json_schema'`` (default).
|
||||
- ``strict=True``
|
||||
- ``include_raw=True``
|
||||
|
||||
If a model elects to call a
|
||||
tool, the resulting ``AIMessage`` in ``"raw"`` will include tool calls.
|
||||
tool, the resulting ``AIMessage`` in ``'raw'`` will include tool calls.
|
||||
|
||||
.. dropdown:: Example
|
||||
|
||||
@@ -887,13 +888,14 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
Returns:
|
||||
A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.
|
||||
|
||||
| If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
||||
If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
|
||||
an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
|
||||
|
||||
| If ``include_raw`` is True, then Runnable outputs a dict with keys:
|
||||
If ``include_raw`` is True, then Runnable outputs a dict with keys:
|
||||
|
||||
- "raw": BaseMessage
|
||||
- "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
|
||||
- "parsing_error": Optional[BaseException]
|
||||
- ``'raw'``: BaseMessage
|
||||
- ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
|
||||
- ``'parsing_error'``: Optional[BaseException]
|
||||
|
||||
.. versionchanged:: 0.1.20
|
||||
|
||||
@@ -921,7 +923,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
||||
specify any Field metadata (like min/max constraints) and fields cannot
|
||||
have default values.
|
||||
|
||||
See all constraints here: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas
|
||||
See all constraints `here <https://platform.openai.com/docs/guides/structured-outputs/supported-schemas>`__.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
Reference in New Issue
Block a user