experimental[major]: upgrade pydantic (#26228)

This commit is contained in:
ccurme
2024-09-09 14:27:24 -04:00
committed by GitHub
parent 109ba548bd
commit 6c8d626d70
45 changed files with 183 additions and 170 deletions

View File

@@ -18,8 +18,7 @@ from langchain_core.messages import (
BaseMessage,
SystemMessage,
)
from langchain_experimental.pydantic_v1 import root_validator
from pydantic import model_validator
prompt = """In addition to responding, you can use tools. \
You have access to the following tools.
@@ -134,8 +133,9 @@ class AnthropicFunctions(BaseChatModel):
llm: BaseChatModel
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
values["llm"] = values.get("llm") or ChatAnthropic(**values)
return values

View File

@@ -7,8 +7,7 @@ from typing import TYPE_CHECKING, Any, List, Optional, cast
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_experimental.pydantic_v1 import Field, root_validator
from pydantic import Field, model_validator
if TYPE_CHECKING:
import jsonformer
@@ -38,10 +37,9 @@ class JsonFormer(HuggingFacePipeline):
)
debug: bool = Field(default=False, description="Debug mode.")
# TODO: move away from `root_validator` since it is deprecated in pydantic v2
# and causes mypy type-checking failures (hence the `type: ignore`)
@root_validator # type: ignore[call-overload]
def check_jsonformer_installation(cls, values: dict) -> dict:
@model_validator(mode="before")
@classmethod
def check_jsonformer_installation(cls, values: dict) -> Any:
import_jsonformer()
return values

View File

@@ -7,8 +7,7 @@ from typing import TYPE_CHECKING, Any, List, Optional
from langchain.schema import LLMResult
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_experimental.pydantic_v1 import Field
from pydantic import Field
if TYPE_CHECKING:
import lmformatenforcer

View File

@@ -31,14 +31,14 @@ from langchain_core.output_parsers.json import JsonOutputParser
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.prompts import SystemMessagePromptTemplate
from langchain_core.pydantic_v1 import (
BaseModel,
)
from langchain_core.runnables import Runnable, RunnableLambda
from langchain_core.runnables.base import RunnableMap
from langchain_core.runnables.passthrough import RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.pydantic import is_basemodel_instance, is_basemodel_subclass
from pydantic import (
BaseModel,
)
DEFAULT_SYSTEM_TEMPLATE = """You have access to the following tools:
@@ -83,14 +83,14 @@ def convert_to_ollama_tool(tool: Any) -> Dict:
"""Convert a tool to an Ollama tool."""
description = None
if _is_pydantic_class(tool):
schema = tool.construct().schema()
schema = tool.model_construct().model_json_schema()
name = schema["title"]
elif isinstance(tool, BaseTool):
schema = tool.tool_call_schema.schema()
schema = tool.tool_call_schema.model_json_schema()
name = tool.get_name()
description = tool.description
elif is_basemodel_instance(tool):
schema = tool.get_input_schema().schema()
schema = tool.get_input_schema().model_json_schema()
name = tool.get_name()
description = tool.description
elif isinstance(tool, dict) and "name" in tool and "parameters" in tool:
@@ -192,7 +192,7 @@ class OllamaFunctions(ChatOllama):
.. code-block:: python
from langchain_experimental.llms import OllamaFunctions
from langchain_core.pydantic_v1 import BaseModel
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
@@ -213,7 +213,7 @@ class OllamaFunctions(ChatOllama):
.. code-block:: python
from langchain_experimental.llms import OllamaFunctions
from langchain_core.pydantic_v1 import BaseModel
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
@@ -234,7 +234,7 @@ class OllamaFunctions(ChatOllama):
.. code-block:: python
from langchain_experimental.llms import OllamaFunctions, convert_to_ollama_tool
from langchain_core.pydantic_v1 import BaseModel
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''

View File

@@ -7,8 +7,7 @@ from typing import TYPE_CHECKING, Any, List, Optional, cast
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_community.llms.utils import enforce_stop_tokens
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_experimental.pydantic_v1 import Field, root_validator
from pydantic import Field, model_validator
if TYPE_CHECKING:
import rellm
@@ -40,10 +39,9 @@ class RELLM(HuggingFacePipeline):
default=200, description="Maximum number of new tokens to generate."
)
# TODO: move away from `root_validator` since it is deprecated in pydantic v2
# and causes mypy type-checking failures (hence the `type: ignore`)
@root_validator # type: ignore[call-overload]
def check_rellm_installation(cls, values: dict) -> dict:
@model_validator(mode="before")
@classmethod
def check_rellm_installation(cls, values: dict) -> Any:
import_rellm()
return values