core,integrations[minor]: Dont error on fields in model_kwargs (#27110)

Given the current erroring behavior, every time we've moved a kwarg from
model_kwargs and made it its own field that was a breaking change.
Updating this behavior to support the old instantiations /
serializations.

Assuming build_extra_kwargs was not something that itself is being used
externally and needs to be kept backwards compatible
This commit is contained in:
Bagatur
2024-10-04 11:30:27 -07:00
committed by GitHub
parent 0495b7f441
commit 4935a14314
17 changed files with 91 additions and 68 deletions

View File

@@ -17,7 +17,7 @@ from langchain_core.utils import (
get_pydantic_field_names,
pre_init,
)
from langchain_core.utils.utils import build_extra_kwargs
from langchain_core.utils.utils import _build_model_kwargs
from pydantic import Field, SecretStr, model_validator
SUPPORTED_ROLES: List[str] = [
@@ -131,10 +131,7 @@ class ChatSnowflakeCortex(BaseChatModel):
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
values = _build_model_kwargs(values, all_required_field_names)
return values
@pre_init

View File

@@ -26,7 +26,7 @@ from langchain_core.utils import (
get_pydantic_field_names,
pre_init,
)
from langchain_core.utils.utils import build_extra_kwargs, convert_to_secret_str
from langchain_core.utils.utils import _build_model_kwargs, convert_to_secret_str
from pydantic import ConfigDict, Field, SecretStr, model_validator
@@ -69,11 +69,8 @@ class _AnthropicCommon(BaseLanguageModel):
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict) -> Any:
extra = values.get("model_kwargs", {})
all_required_field_names = get_pydantic_field_names(cls)
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
values = _build_model_kwargs(values, all_required_field_names)
return values
@pre_init

View File

@@ -8,7 +8,7 @@ from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.outputs import GenerationChunk
from langchain_core.utils import get_pydantic_field_names, pre_init
from langchain_core.utils.utils import build_extra_kwargs
from langchain_core.utils.utils import _build_model_kwargs
from pydantic import Field, model_validator
logger = logging.getLogger(__name__)
@@ -199,10 +199,7 @@ class LlamaCpp(LLM):
def build_model_kwargs(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
values = _build_model_kwargs(values, all_required_field_names)
return values
@property

View File

@@ -34,7 +34,7 @@ from langchain_core.utils import (
pre_init,
)
from langchain_core.utils.pydantic import get_fields
from langchain_core.utils.utils import build_extra_kwargs
from langchain_core.utils.utils import _build_model_kwargs
from pydantic import ConfigDict, Field, model_validator
from langchain_community.utils.openai import is_openai_v1
@@ -268,10 +268,7 @@ class BaseOpenAI(BaseLLM):
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
values = _build_model_kwargs(values, all_required_field_names)
return values
@pre_init

View File

@@ -33,9 +33,12 @@ def test_anthropic_model_kwargs() -> None:
@pytest.mark.requires("anthropic")
def test_anthropic_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
ChatAnthropic(model_kwargs={"max_tokens_to_sample": 5})
def test_anthropic_fields_in_model_kwargs() -> None:
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
llm = ChatAnthropic(model_kwargs={"max_tokens_to_sample": 5})
assert llm.max_tokens_to_sample == 5
llm = ChatAnthropic(model_kwargs={"max_tokens": 5})
assert llm.max_tokens_to_sample == 5
@pytest.mark.requires("anthropic")

View File

@@ -26,13 +26,12 @@ def test_openai_model_kwargs() -> None:
@pytest.mark.requires("openai")
def test_openai_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
OpenAI(model_kwargs={"model_name": "foo"})
# Test that "model" cannot be specified in kwargs
with pytest.raises(ValueError):
OpenAI(model_kwargs={"model": "gpt-3.5-turbo-instruct"})
def test_openai_fields_model_kwargs() -> None:
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
llm = OpenAI(model_kwargs={"model_name": "foo"}, api_key="foo")
assert llm.model_name == "foo"
llm = OpenAI(model_kwargs={"model": "foo"}, api_key="foo")
assert llm.model_name == "foo"
@pytest.mark.requires("openai")