multiple: pydantic 2 compatibility, v0.3 (#26443)

Signed-off-by: ChengZi <chen.zhang@zilliz.com>
Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com>
Co-authored-by: Dan O'Donovan <dan.odonovan@gmail.com>
Co-authored-by: Tom Daniel Grande <tomdgrande@gmail.com>
Co-authored-by: Grande <Tom.Daniel.Grande@statsbygg.no>
Co-authored-by: Bagatur <baskaryan@gmail.com>
Co-authored-by: ccurme <chester.curme@gmail.com>
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
Co-authored-by: Tomaz Bratanic <bratanic.tomaz@gmail.com>
Co-authored-by: ZhangShenao <15201440436@163.com>
Co-authored-by: Friso H. Kingma <fhkingma@gmail.com>
Co-authored-by: ChengZi <chen.zhang@zilliz.com>
Co-authored-by: Nuno Campos <nuno@langchain.dev>
Co-authored-by: Morgante Pell <morgantep@google.com>
This commit is contained in:
Erick Friis
2024-09-13 14:38:45 -07:00
committed by GitHub
parent d9813bdbbc
commit c2a3021bb0
1402 changed files with 38318 additions and 30410 deletions

View File

@@ -31,12 +31,13 @@ from langchain_core.output_parsers.openai_tools import (
PydanticToolsParser,
)
from langchain_core.outputs import ChatResult
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils import from_env, secret_from_env
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field, SecretStr, model_validator
from typing_extensions import Self
from langchain_openai.chat_models.base import BaseChatOpenAI
@@ -250,7 +251,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
Tool calling:
.. code-block:: python
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
@@ -305,7 +306,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
from typing import Optional
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
class Joke(BaseModel):
@@ -494,7 +495,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
default_factory=from_env("OPENAI_API_VERSION", default=None),
)
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
# Check OPENAI_KEY for backwards compatibility.
# Check OPENAI_API_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
openai_api_key: Optional[SecretStr] = Field(
@@ -565,31 +566,31 @@ class AzureChatOpenAI(BaseChatOpenAI):
def is_lc_serializable(cls) -> bool:
return True
@root_validator(pre=False, skip_on_failure=True)
def validate_environment(cls, values: Dict) -> Dict:
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
if self.n < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
if self.n > 1 and self.streaming:
raise ValueError("n must be 1 when streaming.")
# Check OPENAI_ORGANIZATION for backwards compatibility.
values["openai_organization"] = (
values["openai_organization"]
self.openai_organization = (
self.openai_organization
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
openai_api_base = self.openai_api_base
if openai_api_base and self.validate_base_url:
if "/openai" not in openai_api_base:
raise ValueError(
"As of openai>=1.0.0, Azure endpoints should be specified via "
"the `azure_endpoint` param not `openai_api_base` "
"(or alias `base_url`)."
)
if values["deployment_name"]:
if self.deployment_name:
raise ValueError(
"As of openai>=1.0.0, if `azure_deployment` (or alias "
"`deployment_name`) is specified then "
@@ -602,39 +603,36 @@ class AzureChatOpenAI(BaseChatOpenAI):
"Or you can equivalently specify:\n\n"
'base_url="https://xxx.openai.azure.com/openai/deployments/my-deployment"'
)
client_params = {
"api_version": values["openai_api_version"],
"azure_endpoint": values["azure_endpoint"],
"azure_deployment": values["deployment_name"],
client_params: dict = {
"api_version": self.openai_api_version,
"azure_endpoint": self.azure_endpoint,
"azure_deployment": self.deployment_name,
"api_key": (
values["openai_api_key"].get_secret_value()
if values["openai_api_key"]
else None
self.openai_api_key.get_secret_value() if self.openai_api_key else None
),
"azure_ad_token": (
values["azure_ad_token"].get_secret_value()
if values["azure_ad_token"]
else None
self.azure_ad_token.get_secret_value() if self.azure_ad_token else None
),
"azure_ad_token_provider": values["azure_ad_token_provider"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"azure_ad_token_provider": self.azure_ad_token_provider,
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if not values.get("client"):
sync_specific = {"http_client": values["http_client"]}
values["root_client"] = openai.AzureOpenAI(**client_params, **sync_specific)
values["client"] = values["root_client"].chat.completions
if not values.get("async_client"):
async_specific = {"http_client": values["http_async_client"]}
values["root_async_client"] = openai.AsyncAzureOpenAI(
**client_params, **async_specific
if not self.client:
sync_specific = {"http_client": self.http_client}
self.root_client = openai.AzureOpenAI(**client_params, **sync_specific) # type: ignore[arg-type]
self.client = self.root_client.chat.completions
if not self.async_client:
async_specific = {"http_client": self.http_async_client}
self.root_async_client = openai.AsyncAzureOpenAI(
**client_params,
**async_specific, # type: ignore[arg-type]
)
values["async_client"] = values["root_async_client"].chat.completions
return values
self.async_client = self.root_async_client.chat.completions
return self
def bind_tools(
self,
@@ -735,7 +733,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
from typing import Optional
from langchain_openai import AzureChatOpenAI
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
@@ -766,7 +764,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
.. code-block:: python
from langchain_openai import AzureChatOpenAI
from langchain_core.pydantic_v1 import BaseModel
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
@@ -853,7 +851,7 @@ class AzureChatOpenAI(BaseChatOpenAI):
.. code-block::
from langchain_openai import AzureChatOpenAI
from langchain_core.pydantic_v1 import BaseModel
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
answer: str

View File

@@ -72,15 +72,10 @@ from langchain_core.output_parsers.openai_tools import (
parse_tool_call,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough, chain
from langchain_core.runnables.config import run_in_executor
from langchain_core.tools import BaseTool
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
get_pydantic_field_names,
)
from langchain_core.utils import get_pydantic_field_names
from langchain_core.utils.function_calling import (
convert_to_openai_function,
convert_to_openai_tool,
@@ -90,7 +85,9 @@ from langchain_core.utils.pydantic import (
TypeBaseModel,
is_basemodel_subclass,
)
from langchain_core.utils.utils import build_extra_kwargs
from langchain_core.utils.utils import build_extra_kwargs, from_env, secret_from_env
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
logger = logging.getLogger(__name__)
@@ -387,15 +384,18 @@ class BaseChatOpenAI(BaseChatModel):
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_key: Optional[SecretStr] = Field(
alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None)
)
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
openai_organization: Optional[str] = Field(default=None, alias="organization")
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
openai_proxy: Optional[str] = Field(
default_factory=from_env("OPENAI_PROXY", default=None)
)
request_timeout: Union[float, Tuple[float, float], Any, None] = Field(
default=None, alias="timeout"
)
@@ -454,13 +454,11 @@ class BaseChatOpenAI(BaseChatModel):
include_response_headers: bool = False
"""Whether to include response headers in the output message response_metadata."""
class Config:
"""Configuration for this pydantic object."""
model_config = ConfigDict(populate_by_name=True)
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
@@ -469,56 +467,43 @@ class BaseChatOpenAI(BaseChatModel):
)
return values
@root_validator(pre=False, skip_on_failure=True, allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
if self.n < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
if self.n > 1 and self.streaming:
raise ValueError("n must be 1 when streaming.")
values["openai_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "openai_api_key", "OPENAI_API_KEY")
)
# Check OPENAI_ORGANIZATION for backwards compatibility.
values["openai_organization"] = (
values["openai_organization"]
self.openai_organization = (
self.openai_organization
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_proxy"] = get_from_dict_or_env(
values, "openai_proxy", "OPENAI_PROXY", default=""
)
client_params = {
self.openai_api_base = self.openai_api_base or os.getenv("OPENAI_API_BASE")
client_params: dict = {
"api_key": (
values["openai_api_key"].get_secret_value()
if values["openai_api_key"]
else None
self.openai_api_key.get_secret_value() if self.openai_api_key else None
),
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if values["openai_proxy"] and (
values["http_client"] or values["http_async_client"]
):
openai_proxy = values["openai_proxy"]
http_client = values["http_client"]
http_async_client = values["http_async_client"]
if self.openai_proxy and (self.http_client or self.http_async_client):
openai_proxy = self.openai_proxy
http_client = self.http_client
http_async_client = self.http_async_client
raise ValueError(
"Cannot specify 'openai_proxy' if one of "
"'http_client'/'http_async_client' is already specified. Received:\n"
f"{openai_proxy=}\n{http_client=}\n{http_async_client=}"
)
if not values.get("client"):
if values["openai_proxy"] and not values["http_client"]:
if not self.client:
if self.openai_proxy and not self.http_client:
try:
import httpx
except ImportError as e:
@@ -526,12 +511,12 @@ class BaseChatOpenAI(BaseChatModel):
"Could not import httpx python package. "
"Please install it with `pip install httpx`."
) from e
values["http_client"] = httpx.Client(proxy=values["openai_proxy"])
sync_specific = {"http_client": values["http_client"]}
values["root_client"] = openai.OpenAI(**client_params, **sync_specific)
values["client"] = values["root_client"].chat.completions
if not values.get("async_client"):
if values["openai_proxy"] and not values["http_async_client"]:
self.http_client = httpx.Client(proxy=self.openai_proxy)
sync_specific = {"http_client": self.http_client}
self.root_client = openai.OpenAI(**client_params, **sync_specific) # type: ignore[arg-type]
self.client = self.root_client.chat.completions
if not self.async_client:
if self.openai_proxy and not self.http_async_client:
try:
import httpx
except ImportError as e:
@@ -539,15 +524,14 @@ class BaseChatOpenAI(BaseChatModel):
"Could not import httpx python package. "
"Please install it with `pip install httpx`."
) from e
values["http_async_client"] = httpx.AsyncClient(
proxy=values["openai_proxy"]
)
async_specific = {"http_client": values["http_async_client"]}
values["root_async_client"] = openai.AsyncOpenAI(
**client_params, **async_specific
self.http_async_client = httpx.AsyncClient(proxy=self.openai_proxy)
async_specific = {"http_client": self.http_async_client}
self.root_async_client = openai.AsyncOpenAI(
**client_params,
**async_specific, # type: ignore[arg-type]
)
values["async_client"] = values["root_async_client"].chat.completions
return values
self.async_client = self.root_async_client.chat.completions
return self
@property
def _default_params(self) -> Dict[str, Any]:
@@ -1234,7 +1218,7 @@ class BaseChatOpenAI(BaseChatModel):
from typing import Optional
from langchain_openai import ChatOpenAI
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
@@ -1265,7 +1249,7 @@ class BaseChatOpenAI(BaseChatModel):
.. code-block:: python
from langchain_openai import ChatOpenAI
from langchain_core.pydantic_v1 import BaseModel
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
@@ -1355,7 +1339,7 @@ class BaseChatOpenAI(BaseChatModel):
.. code-block::
from langchain_openai import ChatOpenAI
from langchain_core.pydantic_v1 import BaseModel
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
answer: str
@@ -1658,7 +1642,7 @@ class ChatOpenAI(BaseChatOpenAI):
.. code-block:: python
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
@@ -1744,7 +1728,7 @@ class ChatOpenAI(BaseChatOpenAI):
from typing import Optional
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
class Joke(BaseModel):

View File

@@ -2,11 +2,12 @@
from __future__ import annotations
from typing import Callable, Dict, Optional, Union
from typing import Callable, Optional, Union
import openai
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import from_env, secret_from_env
from pydantic import Field, SecretStr, model_validator
from typing_extensions import Self, cast
from langchain_openai.embeddings.base import OpenAIEmbeddings
@@ -154,21 +155,21 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings):
chunk_size: int = 2048
"""Maximum number of texts to embed in each batch"""
@root_validator(pre=False, skip_on_failure=True)
def validate_environment(cls, values: Dict) -> Dict:
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
openai_api_base = self.openai_api_base
if openai_api_base and self.validate_base_url:
if "/openai" not in openai_api_base:
values["openai_api_base"] += "/openai"
self.openai_api_base = cast(str, self.openai_api_base) + "/openai"
raise ValueError(
"As of openai>=1.0.0, Azure endpoints should be specified via "
"the `azure_endpoint` param not `openai_api_base` "
"(or alias `base_url`). "
)
if values["deployment"]:
if self.deployment:
raise ValueError(
"As of openai>=1.0.0, if `deployment` (or alias "
"`azure_deployment`) is specified then "
@@ -176,39 +177,37 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings):
"Instead use `deployment` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
client_params = {
"api_version": values["openai_api_version"],
"azure_endpoint": values["azure_endpoint"],
"azure_deployment": values["deployment"],
client_params: dict = {
"api_version": self.openai_api_version,
"azure_endpoint": self.azure_endpoint,
"azure_deployment": self.deployment,
"api_key": (
values["openai_api_key"].get_secret_value()
if values["openai_api_key"]
else None
self.openai_api_key.get_secret_value() if self.openai_api_key else None
),
"azure_ad_token": (
values["azure_ad_token"].get_secret_value()
if values["azure_ad_token"]
else None
self.azure_ad_token.get_secret_value() if self.azure_ad_token else None
),
"azure_ad_token_provider": values["azure_ad_token_provider"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"azure_ad_token_provider": self.azure_ad_token_provider,
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if not values.get("client"):
sync_specific = {"http_client": values["http_client"]}
values["client"] = openai.AzureOpenAI(
**client_params, **sync_specific
if not self.client:
sync_specific: dict = {"http_client": self.http_client}
self.client = openai.AzureOpenAI(
**client_params, # type: ignore[arg-type]
**sync_specific,
).embeddings
if not values.get("async_client"):
async_specific = {"http_client": values["http_async_client"]}
values["async_client"] = openai.AsyncAzureOpenAI(
**client_params, **async_specific
if not self.async_client:
async_specific: dict = {"http_client": self.http_async_client}
self.async_client = openai.AsyncAzureOpenAI(
**client_params, # type: ignore[arg-type]
**async_specific,
).embeddings
return values
return self
@property
def _llm_type(self) -> str:

View File

@@ -20,8 +20,9 @@ from typing import (
import openai
import tiktoken
from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
from langchain_core.utils import from_env, get_pydantic_field_names, secret_from_env
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
logger = logging.getLogger(__name__)
@@ -263,14 +264,13 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
"""Whether to check the token length of inputs and automatically split inputs
longer than embedding_ctx_length."""
class Config:
"""Configuration for this pydantic object."""
model_config = ConfigDict(
extra="forbid", populate_by_name=True, protected_namespaces=()
)
extra = "forbid"
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
@@ -295,41 +295,37 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
values["model_kwargs"] = extra
return values
@root_validator(pre=False, skip_on_failure=True, allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if values["openai_api_type"] in ("azure", "azure_ad", "azuread"):
if self.openai_api_type in ("azure", "azure_ad", "azuread"):
raise ValueError(
"If you are using Azure, "
"please use the `AzureOpenAIEmbeddings` class."
)
client_params = {
client_params: dict = {
"api_key": (
values["openai_api_key"].get_secret_value()
if values["openai_api_key"]
else None
self.openai_api_key.get_secret_value() if self.openai_api_key else None
),
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if values["openai_proxy"] and (
values["http_client"] or values["http_async_client"]
):
openai_proxy = values["openai_proxy"]
http_client = values["http_client"]
http_async_client = values["http_async_client"]
if self.openai_proxy and (self.http_client or self.http_async_client):
openai_proxy = self.openai_proxy
http_client = self.http_client
http_async_client = self.http_async_client
raise ValueError(
"Cannot specify 'openai_proxy' if one of "
"'http_client'/'http_async_client' is already specified. Received:\n"
f"{openai_proxy=}\n{http_client=}\n{http_async_client=}"
)
if not values.get("client"):
if values["openai_proxy"] and not values["http_client"]:
if not self.client:
if self.openai_proxy and not self.http_client:
try:
import httpx
except ImportError as e:
@@ -337,13 +333,11 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
"Could not import httpx python package. "
"Please install it with `pip install httpx`."
) from e
values["http_client"] = httpx.Client(proxy=values["openai_proxy"])
sync_specific = {"http_client": values["http_client"]}
values["client"] = openai.OpenAI(
**client_params, **sync_specific
).embeddings
if not values.get("async_client"):
if values["openai_proxy"] and not values["http_async_client"]:
self.http_client = httpx.Client(proxy=self.openai_proxy)
sync_specific = {"http_client": self.http_client}
self.client = openai.OpenAI(**client_params, **sync_specific).embeddings # type: ignore[arg-type]
if not self.async_client:
if self.openai_proxy and not self.http_async_client:
try:
import httpx
except ImportError as e:
@@ -351,14 +345,13 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
"Could not import httpx python package. "
"Please install it with `pip install httpx`."
) from e
values["http_async_client"] = httpx.AsyncClient(
proxy=values["openai_proxy"]
)
async_specific = {"http_client": values["http_async_client"]}
values["async_client"] = openai.AsyncOpenAI(
**client_params, **async_specific
self.http_async_client = httpx.AsyncClient(proxy=self.openai_proxy)
async_specific = {"http_client": self.http_async_client}
self.async_client = openai.AsyncOpenAI(
**client_params,
**async_specific, # type: ignore[arg-type]
).embeddings
return values
return self
@property
def _invocation_params(self) -> Dict[str, Any]:

View File

@@ -5,8 +5,9 @@ from typing import Any, Callable, Dict, List, Mapping, Optional, Union
import openai
from langchain_core.language_models import LangSmithParams
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import from_env, secret_from_env
from pydantic import Field, SecretStr, model_validator
from typing_extensions import Self, cast
from langchain_openai.llms.base import BaseOpenAI
@@ -100,29 +101,29 @@ class AzureOpenAI(BaseOpenAI):
"""Return whether this model can be serialized by Langchain."""
return True
@root_validator(pre=False, skip_on_failure=True, allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
if self.n < 1:
raise ValueError("n must be at least 1.")
if values["streaming"] and values["n"] > 1:
if self.streaming and self.n > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
if self.streaming and self.best_of > 1:
raise ValueError("Cannot stream results when best_of > 1.")
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
openai_api_base = self.openai_api_base
if openai_api_base and self.validate_base_url:
if "/openai" not in openai_api_base:
values["openai_api_base"] = (
values["openai_api_base"].rstrip("/") + "/openai"
self.openai_api_base = (
cast(str, self.openai_api_base).rstrip("/") + "/openai"
)
raise ValueError(
"As of openai>=1.0.0, Azure endpoints should be specified via "
"the `azure_endpoint` param not `openai_api_base` "
"(or alias `base_url`)."
)
if values["deployment_name"]:
if self.deployment_name:
raise ValueError(
"As of openai>=1.0.0, if `deployment_name` (or alias "
"`azure_deployment`) is specified then "
@@ -130,37 +131,39 @@ class AzureOpenAI(BaseOpenAI):
"Instead use `deployment_name` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
values["deployment_name"] = None
client_params = {
"api_version": values["openai_api_version"],
"azure_endpoint": values["azure_endpoint"],
"azure_deployment": values["deployment_name"],
"api_key": values["openai_api_key"].get_secret_value()
if values["openai_api_key"]
self.deployment_name = None
client_params: dict = {
"api_version": self.openai_api_version,
"azure_endpoint": self.azure_endpoint,
"azure_deployment": self.deployment_name,
"api_key": self.openai_api_key.get_secret_value()
if self.openai_api_key
else None,
"azure_ad_token": values["azure_ad_token"].get_secret_value()
if values["azure_ad_token"]
"azure_ad_token": self.azure_ad_token.get_secret_value()
if self.azure_ad_token
else None,
"azure_ad_token_provider": values["azure_ad_token_provider"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"azure_ad_token_provider": self.azure_ad_token_provider,
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if not values.get("client"):
sync_specific = {"http_client": values["http_client"]}
values["client"] = openai.AzureOpenAI(
**client_params, **sync_specific
if not self.client:
sync_specific = {"http_client": self.http_client}
self.client = openai.AzureOpenAI(
**client_params,
**sync_specific, # type: ignore[arg-type]
).completions
if not values.get("async_client"):
async_specific = {"http_client": values["http_async_client"]}
values["async_client"] = openai.AsyncAzureOpenAI(
**client_params, **async_specific
if not self.async_client:
async_specific = {"http_client": self.http_async_client}
self.async_client = openai.AsyncAzureOpenAI(
**client_params,
**async_specific, # type: ignore[arg-type]
).completions
return values
return self
@property
def _identifying_params(self) -> Mapping[str, Any]:

View File

@@ -26,9 +26,10 @@ from langchain_core.callbacks import (
)
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import get_pydantic_field_names
from langchain_core.utils.utils import build_extra_kwargs, from_env, secret_from_env
from pydantic import ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
logger = logging.getLogger(__name__)
@@ -152,13 +153,11 @@ class BaseOpenAI(BaseLLM):
"""Optional additional JSON properties to include in the request parameters when
making requests to OpenAI compatible APIs, such as vLLM."""
class Config:
"""Configuration for this pydantic object."""
model_config = ConfigDict(populate_by_name=True)
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
@@ -167,41 +166,38 @@ class BaseOpenAI(BaseLLM):
)
return values
@root_validator(pre=False, skip_on_failure=True, allow_reuse=True)
def validate_environment(cls, values: Dict) -> Dict:
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
if self.n < 1:
raise ValueError("n must be at least 1.")
if values["streaming"] and values["n"] > 1:
if self.streaming and self.n > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
if self.streaming and self.best_of > 1:
raise ValueError("Cannot stream results when best_of > 1.")
client_params = {
client_params: dict = {
"api_key": (
values["openai_api_key"].get_secret_value()
if values["openai_api_key"]
else None
self.openai_api_key.get_secret_value() if self.openai_api_key else None
),
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if not values.get("client"):
sync_specific = {"http_client": values["http_client"]}
values["client"] = openai.OpenAI(
**client_params, **sync_specific
).completions
if not values.get("async_client"):
async_specific = {"http_client": values["http_async_client"]}
values["async_client"] = openai.AsyncOpenAI(
**client_params, **async_specific
if not self.client:
sync_specific = {"http_client": self.http_client}
self.client = openai.OpenAI(**client_params, **sync_specific).completions # type: ignore[arg-type]
if not self.async_client:
async_specific = {"http_client": self.http_async_client}
self.async_client = openai.AsyncOpenAI(
**client_params,
**async_specific, # type: ignore[arg-type]
).completions
return values
return self
@property
def _default_params(self) -> Dict[str, Any]: