mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-23 15:19:33 +00:00
core,integrations[minor]: Dont error on fields in model_kwargs (#27110)
Given the current erroring behavior, every time we've moved a kwarg from model_kwargs and made it its own field that was a breaking change. Updating this behavior to support the old instantiations / serializations. Assuming build_extra_kwargs was not something that itself is being used externally and needs to be kept backwards compatible
This commit is contained in:
parent
0495b7f441
commit
4935a14314
@ -17,7 +17,7 @@ from langchain_core.utils import (
|
|||||||
get_pydantic_field_names,
|
get_pydantic_field_names,
|
||||||
pre_init,
|
pre_init,
|
||||||
)
|
)
|
||||||
from langchain_core.utils.utils import build_extra_kwargs
|
from langchain_core.utils.utils import _build_model_kwargs
|
||||||
from pydantic import Field, SecretStr, model_validator
|
from pydantic import Field, SecretStr, model_validator
|
||||||
|
|
||||||
SUPPORTED_ROLES: List[str] = [
|
SUPPORTED_ROLES: List[str] = [
|
||||||
@ -131,10 +131,7 @@ class ChatSnowflakeCortex(BaseChatModel):
|
|||||||
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
||||||
"""Build extra kwargs from additional params that were passed in."""
|
"""Build extra kwargs from additional params that were passed in."""
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
extra = values.get("model_kwargs", {})
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@pre_init
|
@pre_init
|
||||||
|
@ -26,7 +26,7 @@ from langchain_core.utils import (
|
|||||||
get_pydantic_field_names,
|
get_pydantic_field_names,
|
||||||
pre_init,
|
pre_init,
|
||||||
)
|
)
|
||||||
from langchain_core.utils.utils import build_extra_kwargs, convert_to_secret_str
|
from langchain_core.utils.utils import _build_model_kwargs, convert_to_secret_str
|
||||||
from pydantic import ConfigDict, Field, SecretStr, model_validator
|
from pydantic import ConfigDict, Field, SecretStr, model_validator
|
||||||
|
|
||||||
|
|
||||||
@ -69,11 +69,8 @@ class _AnthropicCommon(BaseLanguageModel):
|
|||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def build_extra(cls, values: Dict) -> Any:
|
def build_extra(cls, values: Dict) -> Any:
|
||||||
extra = values.get("model_kwargs", {})
|
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@pre_init
|
@pre_init
|
||||||
|
@ -8,7 +8,7 @@ from langchain_core.callbacks import CallbackManagerForLLMRun
|
|||||||
from langchain_core.language_models.llms import LLM
|
from langchain_core.language_models.llms import LLM
|
||||||
from langchain_core.outputs import GenerationChunk
|
from langchain_core.outputs import GenerationChunk
|
||||||
from langchain_core.utils import get_pydantic_field_names, pre_init
|
from langchain_core.utils import get_pydantic_field_names, pre_init
|
||||||
from langchain_core.utils.utils import build_extra_kwargs
|
from langchain_core.utils.utils import _build_model_kwargs
|
||||||
from pydantic import Field, model_validator
|
from pydantic import Field, model_validator
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -199,10 +199,7 @@ class LlamaCpp(LLM):
|
|||||||
def build_model_kwargs(cls, values: Dict[str, Any]) -> Any:
|
def build_model_kwargs(cls, values: Dict[str, Any]) -> Any:
|
||||||
"""Build extra kwargs from additional params that were passed in."""
|
"""Build extra kwargs from additional params that were passed in."""
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
extra = values.get("model_kwargs", {})
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -34,7 +34,7 @@ from langchain_core.utils import (
|
|||||||
pre_init,
|
pre_init,
|
||||||
)
|
)
|
||||||
from langchain_core.utils.pydantic import get_fields
|
from langchain_core.utils.pydantic import get_fields
|
||||||
from langchain_core.utils.utils import build_extra_kwargs
|
from langchain_core.utils.utils import _build_model_kwargs
|
||||||
from pydantic import ConfigDict, Field, model_validator
|
from pydantic import ConfigDict, Field, model_validator
|
||||||
|
|
||||||
from langchain_community.utils.openai import is_openai_v1
|
from langchain_community.utils.openai import is_openai_v1
|
||||||
@ -268,10 +268,7 @@ class BaseOpenAI(BaseLLM):
|
|||||||
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
||||||
"""Build extra kwargs from additional params that were passed in."""
|
"""Build extra kwargs from additional params that were passed in."""
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
extra = values.get("model_kwargs", {})
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@pre_init
|
@pre_init
|
||||||
|
@ -33,9 +33,12 @@ def test_anthropic_model_kwargs() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("anthropic")
|
@pytest.mark.requires("anthropic")
|
||||||
def test_anthropic_invalid_model_kwargs() -> None:
|
def test_anthropic_fields_in_model_kwargs() -> None:
|
||||||
with pytest.raises(ValueError):
|
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
|
||||||
ChatAnthropic(model_kwargs={"max_tokens_to_sample": 5})
|
llm = ChatAnthropic(model_kwargs={"max_tokens_to_sample": 5})
|
||||||
|
assert llm.max_tokens_to_sample == 5
|
||||||
|
llm = ChatAnthropic(model_kwargs={"max_tokens": 5})
|
||||||
|
assert llm.max_tokens_to_sample == 5
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("anthropic")
|
@pytest.mark.requires("anthropic")
|
||||||
|
@ -26,13 +26,12 @@ def test_openai_model_kwargs() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("openai")
|
@pytest.mark.requires("openai")
|
||||||
def test_openai_invalid_model_kwargs() -> None:
|
def test_openai_fields_model_kwargs() -> None:
|
||||||
with pytest.raises(ValueError):
|
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
|
||||||
OpenAI(model_kwargs={"model_name": "foo"})
|
llm = OpenAI(model_kwargs={"model_name": "foo"}, api_key="foo")
|
||||||
|
assert llm.model_name == "foo"
|
||||||
# Test that "model" cannot be specified in kwargs
|
llm = OpenAI(model_kwargs={"model": "foo"}, api_key="foo")
|
||||||
with pytest.raises(ValueError):
|
assert llm.model_name == "foo"
|
||||||
OpenAI(model_kwargs={"model": "gpt-3.5-turbo-instruct"})
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("openai")
|
@pytest.mark.requires("openai")
|
||||||
|
@ -32,6 +32,7 @@ from langchain_core.utils.utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
|
"build_extra_kwargs",
|
||||||
"StrictFormatter",
|
"StrictFormatter",
|
||||||
"check_package_version",
|
"check_package_version",
|
||||||
"convert_to_secret_str",
|
"convert_to_secret_str",
|
||||||
@ -46,7 +47,6 @@ __all__ = [
|
|||||||
"raise_for_status_with_text",
|
"raise_for_status_with_text",
|
||||||
"xor_args",
|
"xor_args",
|
||||||
"try_load_from_hub",
|
"try_load_from_hub",
|
||||||
"build_extra_kwargs",
|
|
||||||
"image",
|
"image",
|
||||||
"get_from_env",
|
"get_from_env",
|
||||||
"get_from_dict_or_env",
|
"get_from_dict_or_env",
|
||||||
|
@ -210,6 +210,51 @@ def get_pydantic_field_names(pydantic_cls: Any) -> set[str]:
|
|||||||
return all_required_field_names
|
return all_required_field_names
|
||||||
|
|
||||||
|
|
||||||
|
def _build_model_kwargs(
|
||||||
|
values: dict[str, Any],
|
||||||
|
all_required_field_names: set[str],
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Build "model_kwargs" param from Pydanitc constructor values.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
values: All init args passed in by user.
|
||||||
|
all_required_field_names: All required field names for the pydantic class.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: Extra kwargs.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If a field is specified in both values and extra_kwargs.
|
||||||
|
ValueError: If a field is specified in model_kwargs.
|
||||||
|
"""
|
||||||
|
extra_kwargs = values.get("model_kwargs", {})
|
||||||
|
for field_name in list(values):
|
||||||
|
if field_name in extra_kwargs:
|
||||||
|
raise ValueError(f"Found {field_name} supplied twice.")
|
||||||
|
if field_name not in all_required_field_names:
|
||||||
|
warnings.warn(
|
||||||
|
f"""WARNING! {field_name} is not default parameter.
|
||||||
|
{field_name} was transferred to model_kwargs.
|
||||||
|
Please confirm that {field_name} is what you intended.""",
|
||||||
|
stacklevel=7,
|
||||||
|
)
|
||||||
|
extra_kwargs[field_name] = values.pop(field_name)
|
||||||
|
|
||||||
|
invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys())
|
||||||
|
if invalid_model_kwargs:
|
||||||
|
warnings.warn(
|
||||||
|
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
|
||||||
|
f"Instead they were passed in as part of `model_kwargs` parameter.",
|
||||||
|
stacklevel=7,
|
||||||
|
)
|
||||||
|
for k in invalid_model_kwargs:
|
||||||
|
values[k] = extra_kwargs.pop(k)
|
||||||
|
|
||||||
|
values["model_kwargs"] = extra_kwargs
|
||||||
|
return values
|
||||||
|
|
||||||
|
|
||||||
|
# DON'T USE! Kept for backwards-compatibility but should never have been public.
|
||||||
def build_extra_kwargs(
|
def build_extra_kwargs(
|
||||||
extra_kwargs: dict[str, Any],
|
extra_kwargs: dict[str, Any],
|
||||||
values: dict[str, Any],
|
values: dict[str, Any],
|
||||||
|
@ -17,8 +17,8 @@ EXPECTED_ALL = [
|
|||||||
"raise_for_status_with_text",
|
"raise_for_status_with_text",
|
||||||
"xor_args",
|
"xor_args",
|
||||||
"try_load_from_hub",
|
"try_load_from_hub",
|
||||||
"build_extra_kwargs",
|
|
||||||
"image",
|
"image",
|
||||||
|
"build_extra_kwargs",
|
||||||
"get_from_dict_or_env",
|
"get_from_dict_or_env",
|
||||||
"get_from_env",
|
"get_from_env",
|
||||||
"stringify_dict",
|
"stringify_dict",
|
||||||
|
@ -56,13 +56,13 @@ from langchain_core.runnables import (
|
|||||||
)
|
)
|
||||||
from langchain_core.tools import BaseTool
|
from langchain_core.tools import BaseTool
|
||||||
from langchain_core.utils import (
|
from langchain_core.utils import (
|
||||||
build_extra_kwargs,
|
|
||||||
from_env,
|
from_env,
|
||||||
get_pydantic_field_names,
|
get_pydantic_field_names,
|
||||||
secret_from_env,
|
secret_from_env,
|
||||||
)
|
)
|
||||||
from langchain_core.utils.function_calling import convert_to_openai_tool
|
from langchain_core.utils.function_calling import convert_to_openai_tool
|
||||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||||
|
from langchain_core.utils.utils import _build_model_kwargs
|
||||||
from pydantic import (
|
from pydantic import (
|
||||||
BaseModel,
|
BaseModel,
|
||||||
ConfigDict,
|
ConfigDict,
|
||||||
@ -646,11 +646,8 @@ class ChatAnthropic(BaseChatModel):
|
|||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def build_extra(cls, values: Dict) -> Any:
|
def build_extra(cls, values: Dict) -> Any:
|
||||||
extra = values.get("model_kwargs", {})
|
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
|
@ -25,7 +25,7 @@ from langchain_core.utils import (
|
|||||||
get_pydantic_field_names,
|
get_pydantic_field_names,
|
||||||
)
|
)
|
||||||
from langchain_core.utils.utils import (
|
from langchain_core.utils.utils import (
|
||||||
build_extra_kwargs,
|
_build_model_kwargs,
|
||||||
from_env,
|
from_env,
|
||||||
secret_from_env,
|
secret_from_env,
|
||||||
)
|
)
|
||||||
@ -88,11 +88,8 @@ class _AnthropicCommon(BaseLanguageModel):
|
|||||||
@model_validator(mode="before")
|
@model_validator(mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
def build_extra(cls, values: Dict) -> Any:
|
def build_extra(cls, values: Dict) -> Any:
|
||||||
extra = values.get("model_kwargs", {})
|
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
|
@ -61,9 +61,12 @@ def test_anthropic_model_kwargs() -> None:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("anthropic")
|
@pytest.mark.requires("anthropic")
|
||||||
def test_anthropic_invalid_model_kwargs() -> None:
|
def test_anthropic_fields_in_model_kwargs() -> None:
|
||||||
with pytest.raises(ValueError):
|
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
|
||||||
ChatAnthropic(model="foo", model_kwargs={"max_tokens_to_sample": 5}) # type: ignore[call-arg]
|
llm = ChatAnthropic(model="foo", model_kwargs={"max_tokens_to_sample": 5}) # type: ignore[call-arg]
|
||||||
|
assert llm.max_tokens == 5
|
||||||
|
llm = ChatAnthropic(model="foo", model_kwargs={"max_tokens": 5}) # type: ignore[call-arg]
|
||||||
|
assert llm.max_tokens == 5
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.requires("anthropic")
|
@pytest.mark.requires("anthropic")
|
||||||
|
@ -79,7 +79,7 @@ from langchain_core.utils.function_calling import (
|
|||||||
convert_to_openai_tool,
|
convert_to_openai_tool,
|
||||||
)
|
)
|
||||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||||
from langchain_core.utils.utils import build_extra_kwargs, from_env, secret_from_env
|
from langchain_core.utils.utils import _build_model_kwargs, from_env, secret_from_env
|
||||||
from pydantic import (
|
from pydantic import (
|
||||||
BaseModel,
|
BaseModel,
|
||||||
ConfigDict,
|
ConfigDict,
|
||||||
@ -366,10 +366,7 @@ class ChatFireworks(BaseChatModel):
|
|||||||
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
||||||
"""Build extra kwargs from additional params that were passed in."""
|
"""Build extra kwargs from additional params that were passed in."""
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
extra = values.get("model_kwargs", {})
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
|
@ -11,7 +11,7 @@ from langchain_core.callbacks import (
|
|||||||
)
|
)
|
||||||
from langchain_core.language_models.llms import LLM
|
from langchain_core.language_models.llms import LLM
|
||||||
from langchain_core.utils import get_pydantic_field_names
|
from langchain_core.utils import get_pydantic_field_names
|
||||||
from langchain_core.utils.utils import build_extra_kwargs, secret_from_env
|
from langchain_core.utils.utils import _build_model_kwargs, secret_from_env
|
||||||
from pydantic import ConfigDict, Field, SecretStr, model_validator
|
from pydantic import ConfigDict, Field, SecretStr, model_validator
|
||||||
|
|
||||||
from langchain_fireworks.version import __version__
|
from langchain_fireworks.version import __version__
|
||||||
@ -93,10 +93,7 @@ class Fireworks(LLM):
|
|||||||
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
||||||
"""Build extra kwargs from additional params that were passed in."""
|
"""Build extra kwargs from additional params that were passed in."""
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
extra = values.get("model_kwargs", {})
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -90,7 +90,7 @@ from langchain_core.utils.pydantic import (
|
|||||||
TypeBaseModel,
|
TypeBaseModel,
|
||||||
is_basemodel_subclass,
|
is_basemodel_subclass,
|
||||||
)
|
)
|
||||||
from langchain_core.utils.utils import build_extra_kwargs, from_env, secret_from_env
|
from langchain_core.utils.utils import _build_model_kwargs, from_env, secret_from_env
|
||||||
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
|
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
|
||||||
from typing_extensions import Self
|
from typing_extensions import Self
|
||||||
|
|
||||||
@ -477,10 +477,7 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
||||||
"""Build extra kwargs from additional params that were passed in."""
|
"""Build extra kwargs from additional params that were passed in."""
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
extra = values.get("model_kwargs", {})
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
|
@ -27,7 +27,7 @@ from langchain_core.callbacks import (
|
|||||||
from langchain_core.language_models.llms import BaseLLM
|
from langchain_core.language_models.llms import BaseLLM
|
||||||
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
|
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
|
||||||
from langchain_core.utils import get_pydantic_field_names
|
from langchain_core.utils import get_pydantic_field_names
|
||||||
from langchain_core.utils.utils import build_extra_kwargs, from_env, secret_from_env
|
from langchain_core.utils.utils import _build_model_kwargs, from_env, secret_from_env
|
||||||
from pydantic import ConfigDict, Field, SecretStr, model_validator
|
from pydantic import ConfigDict, Field, SecretStr, model_validator
|
||||||
from typing_extensions import Self
|
from typing_extensions import Self
|
||||||
|
|
||||||
@ -160,10 +160,7 @@ class BaseOpenAI(BaseLLM):
|
|||||||
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
def build_extra(cls, values: Dict[str, Any]) -> Any:
|
||||||
"""Build extra kwargs from additional params that were passed in."""
|
"""Build extra kwargs from additional params that were passed in."""
|
||||||
all_required_field_names = get_pydantic_field_names(cls)
|
all_required_field_names = get_pydantic_field_names(cls)
|
||||||
extra = values.get("model_kwargs", {})
|
values = _build_model_kwargs(values, all_required_field_names)
|
||||||
values["model_kwargs"] = build_extra_kwargs(
|
|
||||||
extra, values, all_required_field_names
|
|
||||||
)
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
|
@ -30,9 +30,12 @@ def test_openai_model_kwargs() -> None:
|
|||||||
assert llm.model_kwargs == {"foo": "bar"}
|
assert llm.model_kwargs == {"foo": "bar"}
|
||||||
|
|
||||||
|
|
||||||
def test_openai_invalid_model_kwargs() -> None:
|
def test_openai_fields_in_model_kwargs() -> None:
|
||||||
with pytest.raises(ValueError):
|
"""Test that for backwards compatibility fields can be passed in as model_kwargs."""
|
||||||
OpenAI(model_kwargs={"model_name": "foo"})
|
llm = OpenAI(model_kwargs={"model_name": "foo"})
|
||||||
|
assert llm.model_name == "foo"
|
||||||
|
llm = OpenAI(model_kwargs={"model": "foo"})
|
||||||
|
assert llm.model_name == "foo"
|
||||||
|
|
||||||
|
|
||||||
def test_openai_incorrect_field() -> None:
|
def test_openai_incorrect_field() -> None:
|
||||||
|
Loading…
Reference in New Issue
Block a user