mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-01 11:02:37 +00:00
core,groq,openai,mistralai,robocorp,fireworks,anthropic[patch]: Update BaseModel subclass and instance checks to handle both v1 and proper namespaces (#24417)
After this PR chat models will correctly handle pydantic 2 with bind_tools and with_structured_output. ```python import pydantic print(pydantic.__version__) ``` 2.8.2 ```python from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field class Add(BaseModel): x: int y: int model = ChatOpenAI().bind_tools([Add]) print(model.invoke('2 + 5').tool_calls) model = ChatOpenAI().with_structured_output(Add) print(type(model.invoke('2 + 5'))) ``` ``` [{'name': 'Add', 'args': {'x': 2, 'y': 5}, 'id': 'call_PNUFa4pdfNOYXxIMHc6ps2Do', 'type': 'tool_call'}] <class '__main__.Add'> ``` ```python from langchain_openai import ChatOpenAI from pydantic.v1 import BaseModel, Field class Add(BaseModel): x: int y: int model = ChatOpenAI().bind_tools([Add]) print(model.invoke('2 + 5').tool_calls) model = ChatOpenAI().with_structured_output(Add) print(type(model.invoke('2 + 5'))) ``` ```python [{'name': 'Add', 'args': {'x': 2, 'y': 5}, 'id': 'call_hhiHYP441cp14TtrHKx3Upg0', 'type': 'tool_call'}] <class '__main__.Add'> ``` Addresses issues: https://github.com/langchain-ai/langchain/issues/22782 --------- Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
This commit is contained in:
@@ -34,11 +34,14 @@ from langchain_core.output_parsers.json import JsonOutputParser
|
||||
from langchain_core.output_parsers.pydantic import PydanticOutputParser
|
||||
from langchain_core.outputs import ChatGeneration, ChatResult
|
||||
from langchain_core.prompts import SystemMessagePromptTemplate
|
||||
from langchain_core.pydantic_v1 import BaseModel
|
||||
from langchain_core.pydantic_v1 import (
|
||||
BaseModel,
|
||||
)
|
||||
from langchain_core.runnables import Runnable, RunnableLambda
|
||||
from langchain_core.runnables.base import RunnableMap
|
||||
from langchain_core.runnables.passthrough import RunnablePassthrough
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.pydantic import is_basemodel_instance, is_basemodel_subclass
|
||||
|
||||
DEFAULT_SYSTEM_TEMPLATE = """You have access to the following tools:
|
||||
|
||||
@@ -75,14 +78,10 @@ _DictOrPydantic = Union[Dict, _BM]
|
||||
|
||||
def _is_pydantic_class(obj: Any) -> bool:
|
||||
return isinstance(obj, type) and (
|
||||
issubclass(obj, BaseModel) or BaseModel in obj.__bases__
|
||||
is_basemodel_subclass(obj) or BaseModel in obj.__bases__
|
||||
)
|
||||
|
||||
|
||||
def _is_pydantic_object(obj: Any) -> bool:
|
||||
return isinstance(obj, BaseModel)
|
||||
|
||||
|
||||
def convert_to_ollama_tool(tool: Any) -> Dict:
|
||||
"""Convert a tool to an Ollama tool."""
|
||||
description = None
|
||||
@@ -93,7 +92,7 @@ def convert_to_ollama_tool(tool: Any) -> Dict:
|
||||
schema = tool.tool_call_schema.schema()
|
||||
name = tool.get_name()
|
||||
description = tool.description
|
||||
elif _is_pydantic_object(tool):
|
||||
elif is_basemodel_instance(tool):
|
||||
schema = tool.get_input_schema().schema()
|
||||
name = tool.get_name()
|
||||
description = tool.description
|
||||
|
@@ -1,11 +1,12 @@
|
||||
import asyncio
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import Any, Dict, List, Optional, Union, cast
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.pydantic_v1 import BaseModel, root_validator
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts.few_shot import FewShotPromptTemplate
|
||||
from langchain_core.utils.pydantic import is_basemodel_instance
|
||||
|
||||
|
||||
class SyntheticDataGenerator(BaseModel):
|
||||
@@ -63,8 +64,10 @@ class SyntheticDataGenerator(BaseModel):
|
||||
"""Prevents duplicates by adding previously generated examples to the few shot
|
||||
list."""
|
||||
if self.template and self.template.examples:
|
||||
if isinstance(example, BaseModel):
|
||||
formatted_example = self._format_dict_to_string(example.dict())
|
||||
if is_basemodel_instance(example):
|
||||
formatted_example = self._format_dict_to_string(
|
||||
cast(BaseModel, example).dict()
|
||||
)
|
||||
elif isinstance(example, dict):
|
||||
formatted_example = self._format_dict_to_string(example)
|
||||
else:
|
||||
|
Reference in New Issue
Block a user