community[patch]: Fix missing protected_namespaces (#27076)

Fixes #26861

---------

Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
This commit is contained in:
Tibor Reiss 2024-10-03 22:12:11 +02:00 committed by GitHub
parent 2a54448a0a
commit 47a9199fa6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 14 additions and 4 deletions

View File

@ -9,7 +9,7 @@ from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, LLMResult
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, SecretStr, model_validator, validator
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator, validator
DEFAULT_TIMEOUT = 50
@ -382,6 +382,8 @@ class AzureMLBaseEndpoint(BaseModel):
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
model_config = ConfigDict(protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environ(cls, values: Dict) -> Any:

View File

@ -295,6 +295,8 @@ class LLMInputOutputAdapter:
class BedrockBase(BaseModel, ABC):
"""Base class for Bedrock models."""
model_config = ConfigDict(protected_namespaces=())
client: Any = Field(exclude=True) #: :meta private:
region_name: Optional[str] = None

View File

@ -16,7 +16,7 @@ from langchain_core.callbacks import (
)
from langchain_core.language_models.llms import LLM
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, Field, SecretStr, model_validator
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from langchain_community.llms.utils import enforce_stop_tokens
@ -58,6 +58,8 @@ class _MinimaxEndpointClient(BaseModel):
class MinimaxCommon(BaseModel):
"""Common parameters for Minimax large language models."""
model_config = ConfigDict(protected_namespaces=())
_client: _MinimaxEndpointClient
model: str = "abab5.5-chat"
"""Model name to use."""

View File

@ -11,7 +11,7 @@ from langchain_core.callbacks.manager import (
from langchain_core.language_models.llms import BaseLLM
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.utils import pre_init
from pydantic import BaseModel, Field
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.utilities.vertexai import (
create_retry_decorator,
@ -100,6 +100,8 @@ async def acompletion_with_retry(
class _VertexAIBase(BaseModel):
model_config = ConfigDict(protected_namespaces=())
project: Optional[str] = None
"The default GCP project to use when making Vertex API calls."
location: str = "us-central1"

View File

@ -6,12 +6,14 @@ from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, Field, SecretStr
from pydantic import BaseModel, ConfigDict, Field, SecretStr
class VolcEngineMaasBase(BaseModel):
"""Base class for VolcEngineMaas models."""
model_config = ConfigDict(protected_namespaces=())
client: Any = None
volc_engine_maas_ak: Optional[SecretStr] = None