diff --git a/docs/extras/modules/chains/how_to/custom_chain.ipynb b/docs/extras/modules/chains/how_to/custom_chain.ipynb index 0c305a3cd42..9034e005966 100644 --- a/docs/extras/modules/chains/how_to/custom_chain.ipynb +++ b/docs/extras/modules/chains/how_to/custom_chain.ipynb @@ -28,7 +28,7 @@ "\n", "from pydantic import Extra\n", "\n", - "from langchain.base_language import BaseLanguageModel\n", + "from langchain.schemea import BaseLanguageModel\n", "from langchain.callbacks.manager import (\n", " AsyncCallbackManagerForChainRun,\n", " CallbackManagerForChainRun,\n", diff --git a/langchain/agents/agent.py b/langchain/agents/agent.py index 63837514762..807101e074e 100644 --- a/langchain/agents/agent.py +++ b/langchain/agents/agent.py @@ -14,7 +14,6 @@ from pydantic import BaseModel, root_validator from langchain.agents.agent_types import AgentType from langchain.agents.tools import InvalidTool -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -35,6 +34,7 @@ from langchain.schema import ( BasePromptTemplate, OutputParserException, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import BaseMessage from langchain.tools.base import BaseTool from langchain.utilities.asyncio import asyncio_timeout diff --git a/langchain/agents/agent_toolkits/csv/base.py b/langchain/agents/agent_toolkits/csv/base.py index dbda4be8baf..f13494bede4 100644 --- a/langchain/agents/agent_toolkits/csv/base.py +++ b/langchain/agents/agent_toolkits/csv/base.py @@ -3,7 +3,7 @@ from typing import Any, List, Optional, Union from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent -from langchain.base_language import BaseLanguageModel +from langchain.schema.language_model import BaseLanguageModel def create_csv_agent( diff --git a/langchain/agents/agent_toolkits/json/base.py b/langchain/agents/agent_toolkits/json/base.py index 3a6f58e582e..67b977a6a42 100644 --- a/langchain/agents/agent_toolkits/json/base.py +++ b/langchain/agents/agent_toolkits/json/base.py @@ -6,9 +6,9 @@ from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain +from langchain.schema.language_model import BaseLanguageModel def create_json_agent( diff --git a/langchain/agents/agent_toolkits/nla/tool.py b/langchain/agents/agent_toolkits/nla/tool.py index 2e79b678561..00997764dcb 100644 --- a/langchain/agents/agent_toolkits/nla/tool.py +++ b/langchain/agents/agent_toolkits/nla/tool.py @@ -4,9 +4,9 @@ from typing import Any, Optional from langchain.agents.tools import Tool -from langchain.base_language import BaseLanguageModel from langchain.chains.api.openapi.chain import OpenAPIEndpointChain from langchain.requests import Requests +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.openapi.utils.api_models import APIOperation from langchain.tools.openapi.utils.openapi_utils import OpenAPISpec diff --git a/langchain/agents/agent_toolkits/nla/toolkit.py b/langchain/agents/agent_toolkits/nla/toolkit.py index 42ac0113393..7415c399980 100644 --- a/langchain/agents/agent_toolkits/nla/toolkit.py +++ b/langchain/agents/agent_toolkits/nla/toolkit.py @@ -7,8 +7,8 @@ from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.agents.agent_toolkits.nla.tool import NLATool -from langchain.base_language import BaseLanguageModel from langchain.requests import Requests +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool from langchain.tools.openapi.utils.openapi_utils import OpenAPISpec from langchain.tools.plugin import AIPlugin diff --git a/langchain/agents/agent_toolkits/openapi/base.py b/langchain/agents/agent_toolkits/openapi/base.py index 60a7aaacc5b..bac17044273 100644 --- a/langchain/agents/agent_toolkits/openapi/base.py +++ b/langchain/agents/agent_toolkits/openapi/base.py @@ -9,9 +9,9 @@ from langchain.agents.agent_toolkits.openapi.prompt import ( from langchain.agents.agent_toolkits.openapi.toolkit import OpenAPIToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain +from langchain.schema.language_model import BaseLanguageModel def create_openapi_agent( diff --git a/langchain/agents/agent_toolkits/openapi/planner.py b/langchain/agents/agent_toolkits/openapi/planner.py index 87cd01ab8d8..0e605221510 100644 --- a/langchain/agents/agent_toolkits/openapi/planner.py +++ b/langchain/agents/agent_toolkits/openapi/planner.py @@ -28,7 +28,6 @@ from langchain.agents.agent_toolkits.openapi.planner_prompt import ( from langchain.agents.agent_toolkits.openapi.spec import ReducedOpenAPISpec from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.tools import Tool -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.llms.openai import OpenAI @@ -36,6 +35,7 @@ from langchain.memory import ReadOnlySharedMemory from langchain.prompts import PromptTemplate from langchain.requests import RequestsWrapper from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool from langchain.tools.requests.tool import BaseRequestsTool diff --git a/langchain/agents/agent_toolkits/openapi/toolkit.py b/langchain/agents/agent_toolkits/openapi/toolkit.py index 9c243cf72bf..e70c92d5385 100644 --- a/langchain/agents/agent_toolkits/openapi/toolkit.py +++ b/langchain/agents/agent_toolkits/openapi/toolkit.py @@ -9,8 +9,8 @@ from langchain.agents.agent_toolkits.json.base import create_json_agent from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit from langchain.agents.agent_toolkits.openapi.prompt import DESCRIPTION from langchain.agents.tools import Tool -from langchain.base_language import BaseLanguageModel from langchain.requests import TextRequestsWrapper +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.json.tool import JsonSpec from langchain.tools.requests.tool import ( diff --git a/langchain/agents/agent_toolkits/pandas/base.py b/langchain/agents/agent_toolkits/pandas/base.py index 264b11ab519..fec09514d73 100644 --- a/langchain/agents/agent_toolkits/pandas/base.py +++ b/langchain/agents/agent_toolkits/pandas/base.py @@ -16,10 +16,10 @@ from langchain.agents.agent_toolkits.pandas.prompt import ( from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.agents.types import AgentType -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import SystemMessage from langchain.tools.python.tool import PythonAstREPLTool diff --git a/langchain/agents/agent_toolkits/powerbi/base.py b/langchain/agents/agent_toolkits/powerbi/base.py index 62fed35c14b..2cb4a79c47e 100644 --- a/langchain/agents/agent_toolkits/powerbi/base.py +++ b/langchain/agents/agent_toolkits/powerbi/base.py @@ -9,9 +9,9 @@ from langchain.agents.agent_toolkits.powerbi.prompt import ( from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain +from langchain.schema.language_model import BaseLanguageModel from langchain.utilities.powerbi import PowerBIDataset diff --git a/langchain/agents/agent_toolkits/powerbi/toolkit.py b/langchain/agents/agent_toolkits/powerbi/toolkit.py index b5ceee2b5de..417dc361c21 100644 --- a/langchain/agents/agent_toolkits/powerbi/toolkit.py +++ b/langchain/agents/agent_toolkits/powerbi/toolkit.py @@ -4,7 +4,6 @@ from typing import List, Optional, Union from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.chat_models.base import BaseChatModel @@ -14,6 +13,7 @@ from langchain.prompts.chat import ( HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.powerbi.prompt import ( QUESTION_TO_QUERY_BASE, diff --git a/langchain/agents/agent_toolkits/python/base.py b/langchain/agents/agent_toolkits/python/base.py index 0cb8d7bd6b5..c1263c84ec3 100644 --- a/langchain/agents/agent_toolkits/python/base.py +++ b/langchain/agents/agent_toolkits/python/base.py @@ -7,9 +7,9 @@ from langchain.agents.agent_toolkits.python.prompt import PREFIX from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent from langchain.agents.types import AgentType -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import SystemMessage from langchain.tools.python.tool import PythonREPLTool diff --git a/langchain/agents/agent_toolkits/spark_sql/base.py b/langchain/agents/agent_toolkits/spark_sql/base.py index 3d4eb399907..b52352b5af3 100644 --- a/langchain/agents/agent_toolkits/spark_sql/base.py +++ b/langchain/agents/agent_toolkits/spark_sql/base.py @@ -6,9 +6,9 @@ from langchain.agents.agent_toolkits.spark_sql.prompt import SQL_PREFIX, SQL_SUF from langchain.agents.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain +from langchain.schema.language_model import BaseLanguageModel def create_spark_sql_agent( diff --git a/langchain/agents/agent_toolkits/spark_sql/toolkit.py b/langchain/agents/agent_toolkits/spark_sql/toolkit.py index 620f12f9325..7c89c7dc5f4 100644 --- a/langchain/agents/agent_toolkits/spark_sql/toolkit.py +++ b/langchain/agents/agent_toolkits/spark_sql/toolkit.py @@ -4,7 +4,7 @@ from typing import List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit -from langchain.base_language import BaseLanguageModel +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.spark_sql.tool import ( InfoSparkSQLTool, diff --git a/langchain/agents/agent_toolkits/sql/base.py b/langchain/agents/agent_toolkits/sql/base.py index 790fecc985a..351f971175d 100644 --- a/langchain/agents/agent_toolkits/sql/base.py +++ b/langchain/agents/agent_toolkits/sql/base.py @@ -12,7 +12,6 @@ from langchain.agents.agent_types import AgentType from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.prompts.chat import ( @@ -20,6 +19,7 @@ from langchain.prompts.chat import ( HumanMessagePromptTemplate, MessagesPlaceholder, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import AIMessage, SystemMessage diff --git a/langchain/agents/agent_toolkits/sql/toolkit.py b/langchain/agents/agent_toolkits/sql/toolkit.py index 4a525252e1c..f9589a601e1 100644 --- a/langchain/agents/agent_toolkits/sql/toolkit.py +++ b/langchain/agents/agent_toolkits/sql/toolkit.py @@ -4,7 +4,7 @@ from typing import List from pydantic import Field from langchain.agents.agent_toolkits.base import BaseToolkit -from langchain.base_language import BaseLanguageModel +from langchain.schema.language_model import BaseLanguageModel from langchain.sql_database import SQLDatabase from langchain.tools import BaseTool from langchain.tools.sql_database.tool import ( diff --git a/langchain/agents/agent_toolkits/vectorstore/base.py b/langchain/agents/agent_toolkits/vectorstore/base.py index c3fd97e8d40..92f7e06e580 100644 --- a/langchain/agents/agent_toolkits/vectorstore/base.py +++ b/langchain/agents/agent_toolkits/vectorstore/base.py @@ -8,9 +8,9 @@ from langchain.agents.agent_toolkits.vectorstore.toolkit import ( VectorStoreToolkit, ) from langchain.agents.mrkl.base import ZeroShotAgent -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain +from langchain.schema.language_model import BaseLanguageModel def create_vectorstore_agent( diff --git a/langchain/agents/agent_toolkits/vectorstore/toolkit.py b/langchain/agents/agent_toolkits/vectorstore/toolkit.py index 4169557028c..73289c8401d 100644 --- a/langchain/agents/agent_toolkits/vectorstore/toolkit.py +++ b/langchain/agents/agent_toolkits/vectorstore/toolkit.py @@ -4,8 +4,8 @@ from typing import List from pydantic import BaseModel, Field from langchain.agents.agent_toolkits.base import BaseToolkit -from langchain.base_language import BaseLanguageModel from langchain.llms.openai import OpenAI +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from langchain.tools.vectorstore.tool import ( VectorStoreQATool, diff --git a/langchain/agents/chat/base.py b/langchain/agents/chat/base.py index 62415f9385c..3eb0cd26cfb 100644 --- a/langchain/agents/chat/base.py +++ b/langchain/agents/chat/base.py @@ -11,7 +11,6 @@ from langchain.agents.chat.prompt import ( SYSTEM_MESSAGE_SUFFIX, ) from langchain.agents.utils import validate_tools_single_input -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.prompts.chat import ( @@ -20,6 +19,7 @@ from langchain.prompts.chat import ( SystemMessagePromptTemplate, ) from langchain.schema import AgentAction, BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool diff --git a/langchain/agents/conversational/base.py b/langchain/agents/conversational/base.py index ce91d9c6b62..7fdffacd89e 100644 --- a/langchain/agents/conversational/base.py +++ b/langchain/agents/conversational/base.py @@ -10,10 +10,10 @@ from langchain.agents.agent_types import AgentType from langchain.agents.conversational.output_parser import ConvoOutputParser from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.utils import validate_tools_single_input -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool diff --git a/langchain/agents/conversational_chat/base.py b/langchain/agents/conversational_chat/base.py index 8eb310b30fb..1ffb7ff1244 100644 --- a/langchain/agents/conversational_chat/base.py +++ b/langchain/agents/conversational_chat/base.py @@ -13,7 +13,6 @@ from langchain.agents.conversational_chat.prompt import ( TEMPLATE_TOOL_RESPONSE, ) from langchain.agents.utils import validate_tools_single_input -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts.chat import ( @@ -23,6 +22,7 @@ from langchain.prompts.chat import ( SystemMessagePromptTemplate, ) from langchain.schema import AgentAction, BaseOutputParser, BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage from langchain.tools.base import BaseTool diff --git a/langchain/agents/initialize.py b/langchain/agents/initialize.py index 8b4ff608f70..dfbb7f9b47b 100644 --- a/langchain/agents/initialize.py +++ b/langchain/agents/initialize.py @@ -4,8 +4,8 @@ from typing import Any, Optional, Sequence from langchain.agents.agent import AgentExecutor from langchain.agents.agent_types import AgentType from langchain.agents.loading import AGENT_TO_CLASS, load_agent -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool diff --git a/langchain/agents/load_tools.py b/langchain/agents/load_tools.py index 2b7b08668d0..320d7d81a20 100644 --- a/langchain/agents/load_tools.py +++ b/langchain/agents/load_tools.py @@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional, Callable, Tuple from mypy_extensions import Arg, KwArg from langchain.agents.tools import Tool -from langchain.base_language import BaseLanguageModel +from langchain.schema.language_model import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs diff --git a/langchain/agents/loading.py b/langchain/agents/loading.py index 5e3a881725e..6447fea103d 100644 --- a/langchain/agents/loading.py +++ b/langchain/agents/loading.py @@ -9,8 +9,8 @@ import yaml from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent from langchain.agents.tools import Tool from langchain.agents.types import AGENT_TO_CLASS -from langchain.base_language import BaseLanguageModel from langchain.chains.loading import load_chain, load_chain_from_config +from langchain.schema.language_model import BaseLanguageModel from langchain.utilities.loading import try_load_from_hub logger = logging.getLogger(__file__) diff --git a/langchain/agents/mrkl/base.py b/langchain/agents/mrkl/base.py index e1d55eb12f6..a3939cbeec8 100644 --- a/langchain/agents/mrkl/base.py +++ b/langchain/agents/mrkl/base.py @@ -11,10 +11,10 @@ from langchain.agents.mrkl.output_parser import MRKLOutputParser from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool diff --git a/langchain/agents/openai_functions_agent/base.py b/langchain/agents/openai_functions_agent/base.py index 3e03297a5b6..fcd1ca50f6f 100644 --- a/langchain/agents/openai_functions_agent/base.py +++ b/langchain/agents/openai_functions_agent/base.py @@ -7,7 +7,6 @@ from typing import Any, List, Optional, Sequence, Tuple, Union from pydantic import root_validator from langchain.agents import BaseSingleActionAgent -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chat_models.openai import ChatOpenAI @@ -23,6 +22,7 @@ from langchain.schema import ( BasePromptTemplate, OutputParserException, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import ( AIMessage, BaseMessage, diff --git a/langchain/agents/openai_functions_multi_agent/base.py b/langchain/agents/openai_functions_multi_agent/base.py index 82c3712d256..8cf16d81222 100644 --- a/langchain/agents/openai_functions_multi_agent/base.py +++ b/langchain/agents/openai_functions_multi_agent/base.py @@ -7,7 +7,6 @@ from typing import Any, List, Optional, Sequence, Tuple, Union from pydantic import root_validator from langchain.agents import BaseMultiActionAgent -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chat_models.openai import ChatOpenAI @@ -23,6 +22,7 @@ from langchain.schema import ( BasePromptTemplate, OutputParserException, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import ( AIMessage, BaseMessage, diff --git a/langchain/agents/react/base.py b/langchain/agents/react/base.py index e7cbfb6466b..51bd1f2373d 100644 --- a/langchain/agents/react/base.py +++ b/langchain/agents/react/base.py @@ -10,10 +10,10 @@ from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT from langchain.agents.react.wiki_prompt import WIKI_PROMPT from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input -from langchain.base_language import BaseLanguageModel from langchain.docstore.base import Docstore from langchain.docstore.document import Document from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool diff --git a/langchain/agents/self_ask_with_search/base.py b/langchain/agents/self_ask_with_search/base.py index 2ddff23a85e..47f5c04dbe8 100644 --- a/langchain/agents/self_ask_with_search/base.py +++ b/langchain/agents/self_ask_with_search/base.py @@ -9,8 +9,8 @@ from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputPar from langchain.agents.self_ask_with_search.prompt import PROMPT from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input -from langchain.base_language import BaseLanguageModel from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.serpapi import SerpAPIWrapper diff --git a/langchain/agents/structured_chat/base.py b/langchain/agents/structured_chat/base.py index 57514e63753..85c214e2919 100644 --- a/langchain/agents/structured_chat/base.py +++ b/langchain/agents/structured_chat/base.py @@ -8,7 +8,6 @@ from langchain.agents.structured_chat.output_parser import ( StructuredChatOutputParserWithRetries, ) from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.prompts.chat import ( @@ -17,6 +16,7 @@ from langchain.prompts.chat import ( SystemMessagePromptTemplate, ) from langchain.schema import AgentAction, BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}" diff --git a/langchain/agents/structured_chat/output_parser.py b/langchain/agents/structured_chat/output_parser.py index d53ae58c838..a59b814401d 100644 --- a/langchain/agents/structured_chat/output_parser.py +++ b/langchain/agents/structured_chat/output_parser.py @@ -9,9 +9,9 @@ from pydantic import Field from langchain.agents.agent import AgentOutputParser from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS -from langchain.base_language import BaseLanguageModel from langchain.output_parsers import OutputFixingParser from langchain.schema import AgentAction, AgentFinish, OutputParserException +from langchain.schema.language_model import BaseLanguageModel logger = logging.getLogger(__name__) diff --git a/langchain/base_language.py b/langchain/base_language.py index 5e97366d6af..30323070e54 100644 --- a/langchain/base_language.py +++ b/langchain/base_language.py @@ -1,105 +1,6 @@ +"""Deprecated module for BaseLanguageModel class, kept for backwards compatibility.""" from __future__ import annotations -from abc import ABC, abstractmethod -from typing import Any, List, Optional, Sequence, Set +from langchain.schema.language_model import BaseLanguageModel -from langchain.callbacks.manager import Callbacks -from langchain.load.serializable import Serializable -from langchain.schema import LLMResult, PromptValue -from langchain.schema.messages import BaseMessage, get_buffer_string - - -def _get_token_ids_default_method(text: str) -> List[int]: - """Encode the text into token IDs.""" - # TODO: this method may not be exact. - # TODO: this method may differ based on model (eg codex). - try: - from transformers import GPT2TokenizerFast - except ImportError: - raise ValueError( - "Could not import transformers python package. " - "This is needed in order to calculate get_token_ids. " - "Please install it with `pip install transformers`." - ) - # create a GPT-2 tokenizer instance - tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") - - # tokenize the text using the GPT-2 tokenizer - return tokenizer.encode(text) - - -class BaseLanguageModel(Serializable, ABC): - """Base class for all language models.""" - - @abstractmethod - def generate_prompt( - self, - prompts: List[PromptValue], - stop: Optional[List[str]] = None, - callbacks: Callbacks = None, - **kwargs: Any, - ) -> LLMResult: - """Take in a list of prompt values and return an LLMResult.""" - - @abstractmethod - async def agenerate_prompt( - self, - prompts: List[PromptValue], - stop: Optional[List[str]] = None, - callbacks: Callbacks = None, - **kwargs: Any, - ) -> LLMResult: - """Take in a list of prompt values and return an LLMResult.""" - - @abstractmethod - def predict( - self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any - ) -> str: - """Predict text from text.""" - - @abstractmethod - def predict_messages( - self, - messages: List[BaseMessage], - *, - stop: Optional[Sequence[str]] = None, - **kwargs: Any, - ) -> BaseMessage: - """Predict message from messages.""" - - @abstractmethod - async def apredict( - self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any - ) -> str: - """Predict text from text.""" - - @abstractmethod - async def apredict_messages( - self, - messages: List[BaseMessage], - *, - stop: Optional[Sequence[str]] = None, - **kwargs: Any, - ) -> BaseMessage: - """Predict message from messages.""" - - def get_token_ids(self, text: str) -> List[int]: - """Get the token present in the text.""" - return _get_token_ids_default_method(text) - - def get_num_tokens(self, text: str) -> int: - """Get the number of tokens present in the text.""" - return len(self.get_token_ids(text)) - - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - """Get the number of tokens in the message.""" - return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages]) - - @classmethod - def all_required_field_names(cls) -> Set: - all_required_field_names = set() - for field in cls.__fields__.values(): - all_required_field_names.add(field.name) - if field.has_alias: - all_required_field_names.add(field.alias) - return all_required_field_names +__all__ = ["BaseLanguageModel"] diff --git a/langchain/chains/api/base.py b/langchain/chains/api/base.py index 9eb4bc2fcdd..c873b1bc281 100644 --- a/langchain/chains/api/base.py +++ b/langchain/chains/api/base.py @@ -5,7 +5,6 @@ from typing import Any, Dict, List, Optional from pydantic import Field, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, @@ -15,6 +14,7 @@ from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.requests import TextRequestsWrapper from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class APIChain(Chain): diff --git a/langchain/chains/api/openapi/chain.py b/langchain/chains/api/openapi/chain.py index 039770f7795..c859ced0549 100644 --- a/langchain/chains/api/openapi/chain.py +++ b/langchain/chains/api/openapi/chain.py @@ -7,13 +7,13 @@ from typing import Any, Dict, List, NamedTuple, Optional, cast from pydantic import BaseModel, Field from requests import Response -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains.api.openapi.requests_chain import APIRequesterChain from langchain.chains.api.openapi.response_chain import APIResponderChain from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.requests import Requests +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.openapi.utils.api_models import APIOperation diff --git a/langchain/chains/api/openapi/requests_chain.py b/langchain/chains/api/openapi/requests_chain.py index 223a630f630..002cdbe5799 100644 --- a/langchain/chains/api/openapi/requests_chain.py +++ b/langchain/chains/api/openapi/requests_chain.py @@ -4,11 +4,11 @@ import json import re from typing import Any -from langchain.base_language import BaseLanguageModel from langchain.chains.api.openapi.prompts import REQUEST_TEMPLATE from langchain.chains.llm import LLMChain from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseOutputParser +from langchain.schema.language_model import BaseLanguageModel class APIRequesterOutputParser(BaseOutputParser): diff --git a/langchain/chains/api/openapi/response_chain.py b/langchain/chains/api/openapi/response_chain.py index 21b4af3be47..18b2617e578 100644 --- a/langchain/chains/api/openapi/response_chain.py +++ b/langchain/chains/api/openapi/response_chain.py @@ -4,11 +4,11 @@ import json import re from typing import Any -from langchain.base_language import BaseLanguageModel from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE from langchain.chains.llm import LLMChain from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseOutputParser +from langchain.schema.language_model import BaseLanguageModel class APIResponderOutputParser(BaseOutputParser): diff --git a/langchain/chains/constitutional_ai/base.py b/langchain/chains/constitutional_ai/base.py index 75eeda79cd5..62055aaee9b 100644 --- a/langchain/chains/constitutional_ai/base.py +++ b/langchain/chains/constitutional_ai/base.py @@ -1,7 +1,6 @@ """Chain for applying constitutional principles to the outputs of another chain.""" from typing import Any, Dict, List, Optional -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple @@ -9,6 +8,7 @@ from langchain.chains.constitutional_ai.principles import PRINCIPLES from langchain.chains.constitutional_ai.prompts import CRITIQUE_PROMPT, REVISION_PROMPT from langchain.chains.llm import LLMChain from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class ConstitutionalChain(Chain): diff --git a/langchain/chains/conversational_retrieval/base.py b/langchain/chains/conversational_retrieval/base.py index a39327eb05b..c601d422d53 100644 --- a/langchain/chains/conversational_retrieval/base.py +++ b/langchain/chains/conversational_retrieval/base.py @@ -9,7 +9,6 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union from pydantic import Extra, Field, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, @@ -22,6 +21,7 @@ from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_ from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.schema import BasePromptTemplate, BaseRetriever, Document +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import BaseMessage from langchain.vectorstores.base import VectorStore diff --git a/langchain/chains/flare/base.py b/langchain/chains/flare/base.py index c52e91c3ed2..df468d7a43d 100644 --- a/langchain/chains/flare/base.py +++ b/langchain/chains/flare/base.py @@ -7,7 +7,6 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple import numpy as np from pydantic import Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( CallbackManagerForChainRun, ) @@ -20,6 +19,7 @@ from langchain.chains.flare.prompts import ( from langchain.chains.llm import LLMChain from langchain.llms import OpenAI from langchain.schema import BasePromptTemplate, BaseRetriever, Generation +from langchain.schema.language_model import BaseLanguageModel class _ResponseChain(LLMChain): diff --git a/langchain/chains/graph_qa/base.py b/langchain/chains/graph_qa/base.py index b194f2e146d..3082a1d1330 100644 --- a/langchain/chains/graph_qa/base.py +++ b/langchain/chains/graph_qa/base.py @@ -5,13 +5,13 @@ from typing import Any, Dict, List, Optional from pydantic import Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ENTITY_EXTRACTION_PROMPT, GRAPH_QA_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.networkx_graph import NetworkxEntityGraph, get_entities from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class GraphQAChain(Chain): diff --git a/langchain/chains/graph_qa/cypher.py b/langchain/chains/graph_qa/cypher.py index f50195b1a0c..2e8d6a851ab 100644 --- a/langchain/chains/graph_qa/cypher.py +++ b/langchain/chains/graph_qa/cypher.py @@ -6,13 +6,13 @@ from typing import Any, Dict, List, Optional from pydantic import Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.neo4j_graph import Neo4jGraph from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel INTERMEDIATE_STEPS_KEY = "intermediate_steps" diff --git a/langchain/chains/graph_qa/hugegraph.py b/langchain/chains/graph_qa/hugegraph.py index a27da8e2c9c..3984372aeed 100644 --- a/langchain/chains/graph_qa/hugegraph.py +++ b/langchain/chains/graph_qa/hugegraph.py @@ -5,7 +5,6 @@ from typing import Any, Dict, List, Optional from pydantic import Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ( @@ -15,6 +14,7 @@ from langchain.chains.graph_qa.prompts import ( from langchain.chains.llm import LLMChain from langchain.graphs.hugegraph import HugeGraph from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class HugeGraphQAChain(Chain): diff --git a/langchain/chains/graph_qa/kuzu.py b/langchain/chains/graph_qa/kuzu.py index d373d79f951..e532fa64ca0 100644 --- a/langchain/chains/graph_qa/kuzu.py +++ b/langchain/chains/graph_qa/kuzu.py @@ -5,13 +5,13 @@ from typing import Any, Dict, List, Optional from pydantic import Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, KUZU_GENERATION_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.kuzu_graph import KuzuGraph from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class KuzuQAChain(Chain): diff --git a/langchain/chains/graph_qa/nebulagraph.py b/langchain/chains/graph_qa/nebulagraph.py index 377559e7076..9cfcc960a43 100644 --- a/langchain/chains/graph_qa/nebulagraph.py +++ b/langchain/chains/graph_qa/nebulagraph.py @@ -5,13 +5,13 @@ from typing import Any, Dict, List, Optional from pydantic import Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, NGQL_GENERATION_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.nebula_graph import NebulaGraph from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class NebulaGraphQAChain(Chain): diff --git a/langchain/chains/graph_qa/sparql.py b/langchain/chains/graph_qa/sparql.py index 5c1389bef26..14e27a867d9 100644 --- a/langchain/chains/graph_qa/sparql.py +++ b/langchain/chains/graph_qa/sparql.py @@ -7,7 +7,6 @@ from typing import Any, Dict, List, Optional from pydantic import Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ( @@ -19,6 +18,7 @@ from langchain.chains.graph_qa.prompts import ( from langchain.chains.llm import LLMChain from langchain.graphs.rdf_graph import RdfGraph from langchain.prompts.base import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class GraphSparqlQAChain(Chain): diff --git a/langchain/chains/hyde/base.py b/langchain/chains/hyde/base.py index 7764c85474f..1fb2f3ac4ce 100644 --- a/langchain/chains/hyde/base.py +++ b/langchain/chains/hyde/base.py @@ -9,12 +9,12 @@ from typing import Any, Dict, List, Optional import numpy as np from pydantic import Extra -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.hyde.prompts import PROMPT_MAP from langchain.chains.llm import LLMChain from langchain.embeddings.base import Embeddings +from langchain.schema.language_model import BaseLanguageModel class HypotheticalDocumentEmbedder(Chain, Embeddings): diff --git a/langchain/chains/llm.py b/langchain/chains/llm.py index bfb2244d686..40cb3d5ec7a 100644 --- a/langchain/chains/llm.py +++ b/langchain/chains/llm.py @@ -6,7 +6,6 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from pydantic import Extra, Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, @@ -25,6 +24,7 @@ from langchain.schema import ( NoOpOutputParser, PromptValue, ) +from langchain.schema.language_model import BaseLanguageModel class LLMChain(Chain): diff --git a/langchain/chains/llm_bash/base.py b/langchain/chains/llm_bash/base.py index f2ec384b655..87644e67b2c 100644 --- a/langchain/chains/llm_bash/base.py +++ b/langchain/chains/llm_bash/base.py @@ -7,12 +7,12 @@ from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.prompt import PROMPT from langchain.schema import BasePromptTemplate, OutputParserException +from langchain.schema.language_model import BaseLanguageModel from langchain.utilities.bash import BashProcess logger = logging.getLogger(__name__) diff --git a/langchain/chains/llm_checker/base.py b/langchain/chains/llm_checker/base.py index 080b1e9635d..e32a93fc4ad 100644 --- a/langchain/chains/llm_checker/base.py +++ b/langchain/chains/llm_checker/base.py @@ -6,7 +6,6 @@ from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain @@ -18,6 +17,7 @@ from langchain.chains.llm_checker.prompt import ( ) from langchain.chains.sequential import SequentialChain from langchain.prompts import PromptTemplate +from langchain.schema.language_model import BaseLanguageModel def _load_question_to_checked_assertions_chain( diff --git a/langchain/chains/llm_math/base.py b/langchain/chains/llm_math/base.py index bce99676712..b5e92620d46 100644 --- a/langchain/chains/llm_math/base.py +++ b/langchain/chains/llm_math/base.py @@ -9,7 +9,6 @@ from typing import Any, Dict, List, Optional import numexpr from pydantic import Extra, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, @@ -18,6 +17,7 @@ from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_math.prompt import PROMPT from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class LLMMathChain(Chain): diff --git a/langchain/chains/llm_summarization_checker/base.py b/langchain/chains/llm_summarization_checker/base.py index 3ae9c7070cb..30e7f8ea853 100644 --- a/langchain/chains/llm_summarization_checker/base.py +++ b/langchain/chains/llm_summarization_checker/base.py @@ -8,12 +8,12 @@ from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.sequential import SequentialChain from langchain.prompts.prompt import PromptTemplate +from langchain.schema.language_model import BaseLanguageModel PROMPTS_DIR = Path(__file__).parent / "prompts" diff --git a/langchain/chains/mapreduce.py b/langchain/chains/mapreduce.py index 8b217bf0231..e7ba46ba605 100644 --- a/langchain/chains/mapreduce.py +++ b/langchain/chains/mapreduce.py @@ -9,7 +9,6 @@ from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains import ReduceDocumentsChain from langchain.chains.base import Chain @@ -19,6 +18,7 @@ from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.text_splitter import TextSplitter diff --git a/langchain/chains/natbot/base.py b/langchain/chains/natbot/base.py index b510f89b93e..2adcea4fd79 100644 --- a/langchain/chains/natbot/base.py +++ b/langchain/chains/natbot/base.py @@ -6,12 +6,12 @@ from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.natbot.prompt import PROMPT from langchain.llms.openai import OpenAI +from langchain.schema.language_model import BaseLanguageModel class NatBotChain(Chain): diff --git a/langchain/chains/openai_functions/citation_fuzzy_match.py b/langchain/chains/openai_functions/citation_fuzzy_match.py index ac812c4904c..6e31da8c12a 100644 --- a/langchain/chains/openai_functions/citation_fuzzy_match.py +++ b/langchain/chains/openai_functions/citation_fuzzy_match.py @@ -2,13 +2,13 @@ from typing import Iterator, List from pydantic import BaseModel, Field -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import get_llm_kwargs from langchain.output_parsers.openai_functions import ( PydanticOutputFunctionsParser, ) from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import HumanMessage, SystemMessage diff --git a/langchain/chains/openai_functions/extraction.py b/langchain/chains/openai_functions/extraction.py index ec4ab46a89d..609bee18d9e 100644 --- a/langchain/chains/openai_functions/extraction.py +++ b/langchain/chains/openai_functions/extraction.py @@ -2,7 +2,6 @@ from typing import Any, List from pydantic import BaseModel -from langchain.base_language import BaseLanguageModel from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import ( @@ -15,6 +14,7 @@ from langchain.output_parsers.openai_functions import ( PydanticAttrOutputFunctionsParser, ) from langchain.prompts import ChatPromptTemplate +from langchain.schema.language_model import BaseLanguageModel def _get_extraction_function(entity_schema: dict) -> dict: diff --git a/langchain/chains/openai_functions/openapi.py b/langchain/chains/openai_functions/openapi.py index a25b67427e2..ed6b6dc173e 100644 --- a/langchain/chains/openai_functions/openapi.py +++ b/langchain/chains/openai_functions/openapi.py @@ -8,7 +8,6 @@ from openapi_schema_pydantic import Parameter from requests import Response from langchain import LLMChain -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.sequential import SequentialChain @@ -17,6 +16,7 @@ from langchain.input import get_colored_text from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain.prompts import ChatPromptTemplate from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import APIOperation from langchain.utilities.openapi import OpenAPISpec diff --git a/langchain/chains/openai_functions/qa_with_structure.py b/langchain/chains/openai_functions/qa_with_structure.py index a3c0584db88..d5dfecc921c 100644 --- a/langchain/chains/openai_functions/qa_with_structure.py +++ b/langchain/chains/openai_functions/qa_with_structure.py @@ -2,7 +2,6 @@ from typing import Any, List, Optional, Type, Union from pydantic import BaseModel, Field -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import get_llm_kwargs from langchain.output_parsers.openai_functions import ( @@ -12,6 +11,7 @@ from langchain.output_parsers.openai_functions import ( from langchain.prompts import PromptTemplate from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate from langchain.schema import BaseLLMOutputParser +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import HumanMessage, SystemMessage diff --git a/langchain/chains/openai_functions/tagging.py b/langchain/chains/openai_functions/tagging.py index 4eeaad5526b..4bddaabba19 100644 --- a/langchain/chains/openai_functions/tagging.py +++ b/langchain/chains/openai_functions/tagging.py @@ -1,6 +1,5 @@ from typing import Any -from langchain.base_language import BaseLanguageModel from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import _convert_schema, get_llm_kwargs @@ -9,6 +8,7 @@ from langchain.output_parsers.openai_functions import ( PydanticOutputFunctionsParser, ) from langchain.prompts import ChatPromptTemplate +from langchain.schema.language_model import BaseLanguageModel def _get_tagging_function(schema: dict) -> dict: diff --git a/langchain/chains/pal/base.py b/langchain/chains/pal/base.py index 3e3ada74be5..e97f18c02f1 100644 --- a/langchain/chains/pal/base.py +++ b/langchain/chains/pal/base.py @@ -9,13 +9,13 @@ from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT from langchain.chains.pal.math_prompt import MATH_PROMPT from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.utilities import PythonREPL diff --git a/langchain/chains/prompt_selector.py b/langchain/chains/prompt_selector.py index 4112aba33af..7b23bf26e69 100644 --- a/langchain/chains/prompt_selector.py +++ b/langchain/chains/prompt_selector.py @@ -3,10 +3,10 @@ from typing import Callable, List, Tuple from pydantic import BaseModel, Field -from langchain.base_language import BaseLanguageModel from langchain.chat_models.base import BaseChatModel from langchain.llms.base import BaseLLM from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class BasePromptSelector(BaseModel, ABC): diff --git a/langchain/chains/qa_generation/base.py b/langchain/chains/qa_generation/base.py index 9cc2383570b..02fed9730d2 100644 --- a/langchain/chains/qa_generation/base.py +++ b/langchain/chains/qa_generation/base.py @@ -5,12 +5,12 @@ from typing import Any, Dict, List, Optional from pydantic import Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter diff --git a/langchain/chains/qa_with_sources/base.py b/langchain/chains/qa_with_sources/base.py index 5e1799cc290..e05d8949c53 100644 --- a/langchain/chains/qa_with_sources/base.py +++ b/langchain/chains/qa_with_sources/base.py @@ -9,7 +9,6 @@ from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, @@ -28,6 +27,7 @@ from langchain.chains.qa_with_sources.map_reduce_prompt import ( ) from langchain.docstore.document import Document from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class BaseQAWithSourcesChain(Chain, ABC): diff --git a/langchain/chains/qa_with_sources/loading.py b/langchain/chains/qa_with_sources/loading.py index f8f8ac5f7e5..895c2984eed 100644 --- a/langchain/chains/qa_with_sources/loading.py +++ b/langchain/chains/qa_with_sources/loading.py @@ -3,11 +3,10 @@ from __future__ import annotations from typing import Any, Mapping, Optional, Protocol -from langchain.base_language import BaseLanguageModel -from langchain.chains import ReduceDocumentsChain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain +from langchain.chains.combine_documents.reduce import ReduceDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain @@ -19,6 +18,7 @@ from langchain.chains.qa_with_sources import ( from langchain.chains.question_answering.map_rerank_prompt import ( PROMPT as MAP_RERANK_PROMPT, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.prompt_template import BasePromptTemplate diff --git a/langchain/chains/query_constructor/base.py b/langchain/chains/query_constructor/base.py index 67afc16b5b0..9269ada39d2 100644 --- a/langchain/chains/query_constructor/base.py +++ b/langchain/chains/query_constructor/base.py @@ -5,7 +5,6 @@ import json from typing import Any, Callable, List, Optional, Sequence from langchain import FewShotPromptTemplate, LLMChain -from langchain.base_language import BaseLanguageModel from langchain.chains.query_constructor.ir import ( Comparator, Operator, @@ -24,6 +23,7 @@ from langchain.chains.query_constructor.prompt import ( from langchain.chains.query_constructor.schema import AttributeInfo from langchain.output_parsers.json import parse_and_check_json_markdown from langchain.schema import BaseOutputParser, BasePromptTemplate, OutputParserException +from langchain.schema.language_model import BaseLanguageModel class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]): diff --git a/langchain/chains/question_answering/__init__.py b/langchain/chains/question_answering/__init__.py index 8c2ed5d6530..6e8976625d6 100644 --- a/langchain/chains/question_answering/__init__.py +++ b/langchain/chains/question_answering/__init__.py @@ -1,7 +1,6 @@ """Load question answering chains.""" from typing import Any, Mapping, Optional, Protocol -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.chains import ReduceDocumentsChain @@ -19,6 +18,7 @@ from langchain.chains.question_answering import ( from langchain.chains.question_answering.map_rerank_prompt import ( PROMPT as MAP_RERANK_PROMPT, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.prompt_template import BasePromptTemplate diff --git a/langchain/chains/retrieval_qa/base.py b/langchain/chains/retrieval_qa/base.py index 3ff10aa3d44..19ead7ba582 100644 --- a/langchain/chains/retrieval_qa/base.py +++ b/langchain/chains/retrieval_qa/base.py @@ -8,7 +8,6 @@ from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, @@ -21,6 +20,7 @@ from langchain.chains.question_answering import load_qa_chain from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR from langchain.prompts import PromptTemplate from langchain.schema import BaseRetriever, Document +from langchain.schema.language_model import BaseLanguageModel from langchain.vectorstores.base import VectorStore diff --git a/langchain/chains/router/llm_router.py b/langchain/chains/router/llm_router.py index 27a0e69d551..b0978646133 100644 --- a/langchain/chains/router/llm_router.py +++ b/langchain/chains/router/llm_router.py @@ -5,7 +5,6 @@ from typing import Any, Dict, List, Optional, Type, cast from pydantic import root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, @@ -14,6 +13,7 @@ from langchain.chains import LLMChain from langchain.chains.router.base import RouterChain from langchain.output_parsers.json import parse_and_check_json_markdown from langchain.schema import BaseOutputParser, BasePromptTemplate, OutputParserException +from langchain.schema.language_model import BaseLanguageModel class LLMRouterChain(RouterChain): diff --git a/langchain/chains/router/multi_prompt.py b/langchain/chains/router/multi_prompt.py index 71373743cfc..240d9018c88 100644 --- a/langchain/chains/router/multi_prompt.py +++ b/langchain/chains/router/multi_prompt.py @@ -3,13 +3,13 @@ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional -from langchain.base_language import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.llm import LLMChain from langchain.chains.router.base import MultiRouteChain, RouterChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE from langchain.prompts import PromptTemplate +from langchain.schema.language_model import BaseLanguageModel class MultiPromptChain(MultiRouteChain): diff --git a/langchain/chains/router/multi_retrieval_qa.py b/langchain/chains/router/multi_retrieval_qa.py index 10a2744a68a..183a87bb714 100644 --- a/langchain/chains/router/multi_retrieval_qa.py +++ b/langchain/chains/router/multi_retrieval_qa.py @@ -3,7 +3,6 @@ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional -from langchain.base_language import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.base import Chain from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE @@ -16,6 +15,7 @@ from langchain.chains.router.multi_retrieval_prompt import ( from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.schema import BaseRetriever +from langchain.schema.language_model import BaseLanguageModel class MultiRetrievalQAChain(MultiRouteChain): diff --git a/langchain/chains/sql_database/base.py b/langchain/chains/sql_database/base.py index a7f9649cb48..c69fdf0b162 100644 --- a/langchain/chains/sql_database/base.py +++ b/langchain/chains/sql_database/base.py @@ -6,13 +6,13 @@ from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS from langchain.prompts.prompt import PromptTemplate from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.sql_database import SQLDatabase from langchain.tools.sql_database.prompt import QUERY_CHECKER diff --git a/langchain/chains/summarize/__init__.py b/langchain/chains/summarize/__init__.py index 5645b73fc52..96d6279302b 100644 --- a/langchain/chains/summarize/__init__.py +++ b/langchain/chains/summarize/__init__.py @@ -1,15 +1,15 @@ """Load summarizing chains.""" from typing import Any, Mapping, Optional, Protocol -from langchain.base_language import BaseLanguageModel -from langchain.chains import ReduceDocumentsChain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain +from langchain.chains.combine_documents.reduce import ReduceDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.summarize import map_reduce_prompt, refine_prompts, stuff_prompt from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel class LoadingCallable(Protocol): diff --git a/langchain/chat_models/base.py b/langchain/chat_models/base.py index a3480137b57..239ae5689aa 100644 --- a/langchain/chat_models/base.py +++ b/langchain/chat_models/base.py @@ -8,7 +8,6 @@ from typing import Any, Dict, List, Mapping, Optional, Sequence from pydantic import Field, root_validator import langchain -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, @@ -25,6 +24,7 @@ from langchain.schema import ( PromptValue, RunInfo, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index eb616514386..457a2f7857b 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -205,7 +205,7 @@ class ChatOpenAI(BaseChatModel): @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" - all_required_field_names = cls.all_required_field_names() + all_required_field_names = cls._all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: diff --git a/langchain/client/runner_utils.py b/langchain/client/runner_utils.py index 7c07a93745f..96cc0becacf 100644 --- a/langchain/client/runner_utils.py +++ b/langchain/client/runner_utils.py @@ -21,7 +21,6 @@ from typing import ( from langchainplus_sdk import LangChainPlusClient, RunEvaluator from langchainplus_sdk.schemas import Example -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.manager import Callbacks from langchain.callbacks.tracers.base import BaseTracer @@ -34,6 +33,7 @@ from langchain.schema import ( ChatResult, LLMResult, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import ( BaseMessage, HumanMessage, diff --git a/langchain/evaluation/agents/trajectory_eval_chain.py b/langchain/evaluation/agents/trajectory_eval_chain.py index b0249da226b..fe34be36706 100644 --- a/langchain/evaluation/agents/trajectory_eval_chain.py +++ b/langchain/evaluation/agents/trajectory_eval_chain.py @@ -9,7 +9,6 @@ from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union from pydantic import Extra, Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, @@ -23,6 +22,7 @@ from langchain.evaluation.agents.trajectory_eval_prompt import ( ) from langchain.evaluation.schema import AgentTrajectoryEvaluator, LLMEvalChain from langchain.schema import AgentAction, BaseOutputParser, OutputParserException +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool diff --git a/langchain/evaluation/comparison/eval_chain.py b/langchain/evaluation/comparison/eval_chain.py index 4fc1c978d93..d1aa81436d0 100644 --- a/langchain/evaluation/comparison/eval_chain.py +++ b/langchain/evaluation/comparison/eval_chain.py @@ -5,13 +5,13 @@ from typing import Any, Optional from pydantic import Extra, Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import Callbacks from langchain.chains.llm import LLMChain from langchain.evaluation.comparison.prompt import PROMPT, PROMPT_WITH_REFERENCE from langchain.evaluation.schema import LLMEvalChain, PairwiseStringEvaluator from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseOutputParser +from langchain.schema.language_model import BaseLanguageModel class PairwiseStringResultOutputParser(BaseOutputParser[dict]): diff --git a/langchain/evaluation/criteria/eval_chain.py b/langchain/evaluation/criteria/eval_chain.py index f2dd3c6b084..ea6d9119f20 100644 --- a/langchain/evaluation/criteria/eval_chain.py +++ b/langchain/evaluation/criteria/eval_chain.py @@ -4,12 +4,12 @@ from typing import Any, Dict, List, Mapping, Optional, Sequence, Union from pydantic import Extra, Field -from langchain.base_language import BaseLanguageModel from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.llm import LLMChain from langchain.evaluation.criteria.prompt import PROMPT, PROMPT_WITH_REFERENCES from langchain.evaluation.schema import LLMEvalChain, StringEvaluator from langchain.schema import BaseOutputParser, BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel _SUPPORTED_CRITERIA = { "conciseness": "Is the submission concise and to the point?", diff --git a/langchain/evaluation/loading.py b/langchain/evaluation/loading.py index c5ce94ddc59..549b9d6cb5b 100644 --- a/langchain/evaluation/loading.py +++ b/langchain/evaluation/loading.py @@ -1,7 +1,6 @@ """Loading datasets and evaluators.""" from typing import Any, Dict, List, Optional, Sequence, Type -from langchain.base_language import BaseLanguageModel from langchain.chains.base import Chain from langchain.chat_models.openai import ChatOpenAI from langchain.evaluation.agents.trajectory_eval_chain import TrajectoryEvalChain @@ -9,6 +8,7 @@ from langchain.evaluation.comparison import PairwiseStringEvalChain from langchain.evaluation.criteria.eval_chain import CriteriaEvalChain from langchain.evaluation.qa import ContextQAEvalChain, CotQAEvalChain, QAEvalChain from langchain.evaluation.schema import EvaluatorType, LLMEvalChain +from langchain.schema.language_model import BaseLanguageModel def load_dataset(uri: str) -> List[Dict]: diff --git a/langchain/evaluation/qa/eval_chain.py b/langchain/evaluation/qa/eval_chain.py index 725539616c2..cbcb564eaec 100644 --- a/langchain/evaluation/qa/eval_chain.py +++ b/langchain/evaluation/qa/eval_chain.py @@ -6,11 +6,11 @@ from typing import Any, List, Optional, Sequence from pydantic import Extra from langchain import PromptTemplate -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import Callbacks from langchain.chains.llm import LLMChain from langchain.evaluation.qa.eval_prompt import CONTEXT_PROMPT, COT_PROMPT, PROMPT from langchain.evaluation.schema import LLMEvalChain, StringEvaluator +from langchain.schema.language_model import BaseLanguageModel def _parse_string_eval_output(text: str) -> dict: diff --git a/langchain/evaluation/qa/generate_chain.py b/langchain/evaluation/qa/generate_chain.py index 8fd01d6a67e..1a2bc24cd20 100644 --- a/langchain/evaluation/qa/generate_chain.py +++ b/langchain/evaluation/qa/generate_chain.py @@ -3,9 +3,9 @@ from __future__ import annotations from typing import Any -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.evaluation.qa.generate_prompt import PROMPT +from langchain.schema.language_model import BaseLanguageModel class QAGenerateChain(LLMChain): diff --git a/langchain/evaluation/run_evaluators/implementations.py b/langchain/evaluation/run_evaluators/implementations.py index 1c8ad643b9f..f627e43bacd 100644 --- a/langchain/evaluation/run_evaluators/implementations.py +++ b/langchain/evaluation/run_evaluators/implementations.py @@ -4,7 +4,6 @@ from langchainplus_sdk.evaluation import EvaluationResult from langchainplus_sdk.schemas import Example, Run, RunTypeEnum from pydantic import BaseModel, Field -from langchain.base_language import BaseLanguageModel from langchain.chat_models.base import BaseChatModel from langchain.evaluation.agents.trajectory_eval_chain import ( TrajectoryEvalChain, @@ -24,6 +23,7 @@ from langchain.evaluation.run_evaluators.base import ( ) from langchain.prompts.prompt import PromptTemplate from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool _QA_PROMPTS = { diff --git a/langchain/evaluation/schema.py b/langchain/evaluation/schema.py index 57fea9e6cc4..1f76aeecfbb 100644 --- a/langchain/evaluation/schema.py +++ b/langchain/evaluation/schema.py @@ -7,9 +7,9 @@ from enum import Enum from typing import Any, Optional, Sequence, Tuple from warnings import warn -from langchain.base_language import BaseLanguageModel from langchain.chains.base import Chain from langchain.schema.agent import AgentAction +from langchain.schema.language_model import BaseLanguageModel logger = logging.getLogger(__name__) diff --git a/langchain/example_generator.py b/langchain/example_generator.py index e1ce34d8fcc..e825b9eb988 100644 --- a/langchain/example_generator.py +++ b/langchain/example_generator.py @@ -1,10 +1,10 @@ """Utility functions for working with prompts.""" from typing import List -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate +from langchain.schema.language_model import BaseLanguageModel TEST_GEN_TEMPLATE_SUFFIX = "Add another example." diff --git a/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py b/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py index ba87e5edede..446a676cac8 100644 --- a/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py +++ b/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py @@ -4,7 +4,6 @@ from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.experimental.autonomous_agents.baby_agi.task_creation import ( @@ -16,6 +15,7 @@ from langchain.experimental.autonomous_agents.baby_agi.task_execution import ( from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import ( TaskPrioritizationChain, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.vectorstores.base import VectorStore diff --git a/langchain/experimental/autonomous_agents/baby_agi/task_creation.py b/langchain/experimental/autonomous_agents/baby_agi/task_creation.py index d3a1dc81567..7f7713cdc8d 100644 --- a/langchain/experimental/autonomous_agents/baby_agi/task_creation.py +++ b/langchain/experimental/autonomous_agents/baby_agi/task_creation.py @@ -1,5 +1,5 @@ from langchain import LLMChain, PromptTemplate -from langchain.base_language import BaseLanguageModel +from langchain.schema.language_model import BaseLanguageModel class TaskCreationChain(LLMChain): diff --git a/langchain/experimental/autonomous_agents/baby_agi/task_execution.py b/langchain/experimental/autonomous_agents/baby_agi/task_execution.py index aac943c03fe..d968395c852 100644 --- a/langchain/experimental/autonomous_agents/baby_agi/task_execution.py +++ b/langchain/experimental/autonomous_agents/baby_agi/task_execution.py @@ -1,5 +1,5 @@ from langchain import LLMChain, PromptTemplate -from langchain.base_language import BaseLanguageModel +from langchain.schema.language_model import BaseLanguageModel class TaskExecutionChain(LLMChain): diff --git a/langchain/experimental/autonomous_agents/baby_agi/task_prioritization.py b/langchain/experimental/autonomous_agents/baby_agi/task_prioritization.py index d8b44c585d4..34affa398a1 100644 --- a/langchain/experimental/autonomous_agents/baby_agi/task_prioritization.py +++ b/langchain/experimental/autonomous_agents/baby_agi/task_prioritization.py @@ -1,5 +1,5 @@ from langchain import LLMChain, PromptTemplate -from langchain.base_language import BaseLanguageModel +from langchain.schema.language_model import BaseLanguageModel class TaskPrioritizationChain(LLMChain): diff --git a/langchain/experimental/generative_agents/generative_agent.py b/langchain/experimental/generative_agents/generative_agent.py index 187a0d0c1e1..3d3db854c43 100644 --- a/langchain/experimental/generative_agents/generative_agent.py +++ b/langchain/experimental/generative_agents/generative_agent.py @@ -5,9 +5,9 @@ from typing import Any, Dict, List, Optional, Tuple from pydantic import BaseModel, Field from langchain import LLMChain -from langchain.base_language import BaseLanguageModel from langchain.experimental.generative_agents.memory import GenerativeAgentMemory from langchain.prompts import PromptTemplate +from langchain.schema.language_model import BaseLanguageModel class GenerativeAgent(BaseModel): diff --git a/langchain/experimental/generative_agents/memory.py b/langchain/experimental/generative_agents/memory.py index b828b05f97e..67303f9c106 100644 --- a/langchain/experimental/generative_agents/memory.py +++ b/langchain/experimental/generative_agents/memory.py @@ -4,10 +4,10 @@ from datetime import datetime from typing import Any, Dict, List, Optional from langchain import LLMChain -from langchain.base_language import BaseLanguageModel from langchain.prompts import PromptTemplate from langchain.retrievers import TimeWeightedVectorStoreRetriever from langchain.schema import BaseMemory, Document +from langchain.schema.language_model import BaseLanguageModel from langchain.utils import mock_now logger = logging.getLogger(__name__) diff --git a/langchain/experimental/plan_and_execute/executors/agent_executor.py b/langchain/experimental/plan_and_execute/executors/agent_executor.py index 26abdf2c5f6..2b452d8f90c 100644 --- a/langchain/experimental/plan_and_execute/executors/agent_executor.py +++ b/langchain/experimental/plan_and_execute/executors/agent_executor.py @@ -2,8 +2,8 @@ from typing import List from langchain.agents.agent import AgentExecutor from langchain.agents.structured_chat.base import StructuredChatAgent -from langchain.base_language import BaseLanguageModel from langchain.experimental.plan_and_execute.executors.base import ChainExecutor +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool HUMAN_MESSAGE_TEMPLATE = """Previous steps: {previous_steps} diff --git a/langchain/experimental/plan_and_execute/planners/chat_planner.py b/langchain/experimental/plan_and_execute/planners/chat_planner.py index 1fb879a68f0..68eb5dad205 100644 --- a/langchain/experimental/plan_and_execute/planners/chat_planner.py +++ b/langchain/experimental/plan_and_execute/planners/chat_planner.py @@ -1,6 +1,5 @@ import re -from langchain.base_language import BaseLanguageModel from langchain.chains import LLMChain from langchain.experimental.plan_and_execute.planners.base import LLMPlanner from langchain.experimental.plan_and_execute.schema import ( @@ -9,6 +8,7 @@ from langchain.experimental.plan_and_execute.schema import ( Step, ) from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import SystemMessage SYSTEM_PROMPT = ( diff --git a/langchain/indexes/graph.py b/langchain/indexes/graph.py index 171826d68e8..ddc4e00400f 100644 --- a/langchain/indexes/graph.py +++ b/langchain/indexes/graph.py @@ -3,12 +3,12 @@ from typing import Optional, Type from pydantic import BaseModel -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.graphs.networkx_graph import NetworkxEntityGraph, parse_triples from langchain.indexes.prompts.knowledge_triplet_extraction import ( KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) +from langchain.schema.language_model import BaseLanguageModel class GraphIndexCreator(BaseModel): diff --git a/langchain/indexes/vectorstore.py b/langchain/indexes/vectorstore.py index f07d01a676d..daa83092d07 100644 --- a/langchain/indexes/vectorstore.py +++ b/langchain/indexes/vectorstore.py @@ -2,7 +2,6 @@ from typing import Any, List, Optional, Type from pydantic import BaseModel, Extra, Field -from langchain.base_language import BaseLanguageModel from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain from langchain.chains.retrieval_qa.base import RetrievalQA from langchain.document_loaders.base import BaseLoader @@ -10,6 +9,7 @@ from langchain.embeddings.base import Embeddings from langchain.embeddings.openai import OpenAIEmbeddings from langchain.llms.openai import OpenAI from langchain.schema import Document +from langchain.schema.language_model import BaseLanguageModel from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter from langchain.vectorstores.base import VectorStore from langchain.vectorstores.chroma import Chroma diff --git a/langchain/llms/base.py b/langchain/llms/base.py index 5a3ab0aafba..b11712f61ec 100644 --- a/langchain/llms/base.py +++ b/langchain/llms/base.py @@ -33,7 +33,6 @@ from tenacity import ( ) import langchain -from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, @@ -49,6 +48,7 @@ from langchain.schema import ( PromptValue, RunInfo, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import AIMessage, BaseMessage, get_buffer_string logger = logging.getLogger(__name__) diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index ef08c2551ad..4eb28f3b90f 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -186,7 +186,7 @@ class BaseOpenAI(BaseLLM): @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" - all_required_field_names = cls.all_required_field_names() + all_required_field_names = cls._all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: diff --git a/langchain/memory/entity.py b/langchain/memory/entity.py index 1da66f3da37..5691ac51abd 100644 --- a/langchain/memory/entity.py +++ b/langchain/memory/entity.py @@ -5,7 +5,6 @@ from typing import Any, Dict, Iterable, List, Optional from pydantic import BaseModel, Field -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( @@ -14,6 +13,7 @@ from langchain.memory.prompt import ( ) from langchain.memory.utils import get_prompt_input_key from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import BaseMessage, get_buffer_string logger = logging.getLogger(__name__) diff --git a/langchain/memory/kg.py b/langchain/memory/kg.py index bad45eda9cc..dc1019fc12d 100644 --- a/langchain/memory/kg.py +++ b/langchain/memory/kg.py @@ -2,7 +2,6 @@ from typing import Any, Dict, List, Type, Union from pydantic import Field -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.graphs import NetworkxEntityGraph from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples @@ -13,6 +12,7 @@ from langchain.memory.prompt import ( ) from langchain.memory.utils import get_prompt_input_key from langchain.schema import BasePromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import BaseMessage, SystemMessage, get_buffer_string diff --git a/langchain/memory/summary.py b/langchain/memory/summary.py index 7345cd72759..449a4361fed 100644 --- a/langchain/memory/summary.py +++ b/langchain/memory/summary.py @@ -4,7 +4,6 @@ from typing import Any, Dict, List, Type from pydantic import BaseModel, root_validator -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import SUMMARY_PROMPT @@ -12,6 +11,7 @@ from langchain.schema import ( BaseChatMessageHistory, BasePromptTemplate, ) +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import BaseMessage, SystemMessage, get_buffer_string diff --git a/langchain/memory/token_buffer.py b/langchain/memory/token_buffer.py index 63b00007f49..80f76c3b76f 100644 --- a/langchain/memory/token_buffer.py +++ b/langchain/memory/token_buffer.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List -from langchain.base_language import BaseLanguageModel from langchain.memory.chat_memory import BaseChatMemory +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import BaseMessage, get_buffer_string diff --git a/langchain/output_parsers/fix.py b/langchain/output_parsers/fix.py index 416f88bb001..750a0654165 100644 --- a/langchain/output_parsers/fix.py +++ b/langchain/output_parsers/fix.py @@ -2,10 +2,10 @@ from __future__ import annotations from typing import TypeVar -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT from langchain.schema import BaseOutputParser, BasePromptTemplate, OutputParserException +from langchain.schema.language_model import BaseLanguageModel T = TypeVar("T") diff --git a/langchain/output_parsers/retry.py b/langchain/output_parsers/retry.py index c0061e4278d..e4af4b23f65 100644 --- a/langchain/output_parsers/retry.py +++ b/langchain/output_parsers/retry.py @@ -2,7 +2,6 @@ from __future__ import annotations from typing import TypeVar -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( @@ -11,6 +10,7 @@ from langchain.schema import ( OutputParserException, PromptValue, ) +from langchain.schema.language_model import BaseLanguageModel NAIVE_COMPLETION_RETRY = """Prompt: {prompt} diff --git a/langchain/retrievers/document_compressors/chain_extract.py b/langchain/retrievers/document_compressors/chain_extract.py index 94ff43842fd..a50703f2cc2 100644 --- a/langchain/retrievers/document_compressors/chain_extract.py +++ b/langchain/retrievers/document_compressors/chain_extract.py @@ -5,13 +5,13 @@ import asyncio from typing import Any, Callable, Dict, Optional, Sequence from langchain import LLMChain, PromptTemplate -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import Callbacks from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.retrievers.document_compressors.chain_extract_prompt import ( prompt_template, ) from langchain.schema import BaseOutputParser, Document +from langchain.schema.language_model import BaseLanguageModel def default_get_input(query: str, doc: Document) -> Dict[str, Any]: diff --git a/langchain/retrievers/document_compressors/chain_filter.py b/langchain/retrievers/document_compressors/chain_filter.py index ae038175a9e..5b4aae949dc 100644 --- a/langchain/retrievers/document_compressors/chain_filter.py +++ b/langchain/retrievers/document_compressors/chain_filter.py @@ -2,7 +2,6 @@ from typing import Any, Callable, Dict, Optional, Sequence from langchain import LLMChain, PromptTemplate -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import Callbacks from langchain.output_parsers.boolean import BooleanOutputParser from langchain.retrievers.document_compressors.base import BaseDocumentCompressor @@ -10,6 +9,7 @@ from langchain.retrievers.document_compressors.chain_filter_prompt import ( prompt_template, ) from langchain.schema import BasePromptTemplate, Document +from langchain.schema.language_model import BaseLanguageModel def _get_default_chain_prompt() -> PromptTemplate: diff --git a/langchain/retrievers/self_query/base.py b/langchain/retrievers/self_query/base.py index c139ca736b2..60e63087c60 100644 --- a/langchain/retrievers/self_query/base.py +++ b/langchain/retrievers/self_query/base.py @@ -5,7 +5,6 @@ from typing import Any, Dict, List, Optional, Type, cast from pydantic import BaseModel, Field, root_validator from langchain import LLMChain -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, @@ -19,6 +18,7 @@ from langchain.retrievers.self_query.pinecone import PineconeTranslator from langchain.retrievers.self_query.qdrant import QdrantTranslator from langchain.retrievers.self_query.weaviate import WeaviateTranslator from langchain.schema import BaseRetriever, Document +from langchain.schema.language_model import BaseLanguageModel from langchain.vectorstores import ( Chroma, MyScale, diff --git a/langchain/schema/__init__.py b/langchain/schema/__init__.py index 818ff4113d7..1e660248211 100644 --- a/langchain/schema/__init__.py +++ b/langchain/schema/__init__.py @@ -1,5 +1,6 @@ from langchain.schema.agent import AgentAction, AgentFinish from langchain.schema.document import BaseDocumentTransformer, Document +from langchain.schema.language_model import BaseLanguageModel from langchain.schema.memory import BaseChatMessageHistory, BaseMemory from langchain.schema.messages import ( AIMessage, @@ -66,5 +67,6 @@ __all__ = [ "BaseOutputParser", "BaseLLMOutputParser", "BasePromptTemplate", + "BaseLanguageModel", "format_document", ] diff --git a/langchain/schema/language_model.py b/langchain/schema/language_model.py new file mode 100644 index 00000000000..2c3727bc7e7 --- /dev/null +++ b/langchain/schema/language_model.py @@ -0,0 +1,254 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Set + +from langchain.load.serializable import Serializable +from langchain.schema.messages import BaseMessage, get_buffer_string +from langchain.schema.output import LLMResult +from langchain.schema.prompt import PromptValue + +if TYPE_CHECKING: + from langchain.callbacks.manager import Callbacks + + +def _get_token_ids_default_method(text: str) -> List[int]: + """Encode the text into token IDs.""" + # TODO: this method may not be exact. + # TODO: this method may differ based on model (eg codex). + try: + from transformers import GPT2TokenizerFast + except ImportError: + raise ImportError( + "Could not import transformers python package. " + "This is needed in order to calculate get_token_ids. " + "Please install it with `pip install transformers`." + ) + # create a GPT-2 tokenizer instance + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") + + # tokenize the text using the GPT-2 tokenizer + return tokenizer.encode(text) + + +class BaseLanguageModel(Serializable, ABC): + """Abstract base class for interfacing with language models. + + All language model wrappers inherit from BaseLanguageModel. + + Exposes three main methods: + - generate_prompt: generate language model outputs for a sequence of prompt + values. A prompt value is a model input that can be converted to any language + model input format (string or messages). + - predict: pass in a single string to a language model and return a string + prediction. + - predict_messages: pass in a sequence of BaseMessages (corresponding to a single + model call) to a language model and return a BaseMessage prediction. + + Each of these has an equivalent asynchronous method. + """ + + @abstractmethod + def generate_prompt( + self, + prompts: List[PromptValue], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> LLMResult: + """Pass a sequence of prompts to the model and return model generations. + + This method should make use of batched calls for models that expose a batched + API. + + Use this method when you want to: + 1. take advantage of batched calls, + 2. need more output from the model than just the top generated value, + 3. are building chains that are agnostic to the underlying language model + type (e.g., pure text completion models vs chat models). + + Args: + prompts: List of PromptValues. A PromptValue is an object that can be + converted to match the format of any language model (string for pure + text generation models and BaseMessages for chat models). + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An LLMResult, which contains a list of candidate Generations for each input + prompt and additional model provider-specific output. + """ + + @abstractmethod + async def agenerate_prompt( + self, + prompts: List[PromptValue], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, + ) -> LLMResult: + """Asynchronously pass a sequence of prompts and return model generations. + + This method should make use of batched calls for models that expose a batched + API. + + Use this method when you want to: + 1. take advantage of batched calls, + 2. need more output from the model than just the top generated value, + 3. are building chains that are agnostic to the underlying language model + type (e.g., pure text completion models vs chat models). + + Args: + prompts: List of PromptValues. A PromptValue is an object that can be + converted to match the format of any language model (string for pure + text generation models and BaseMessages for chat models). + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + callbacks: Callbacks to pass through. Used for executing additional + functionality, such as logging or streaming, throughout generation. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + An LLMResult, which contains a list of candidate Generations for each input + prompt and additional model provider-specific output. + """ + + @abstractmethod + def predict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: + """Pass a single string input to the model and return a string prediction. + + Use this method when passing in raw text. If you want to pass in specific + types of chat messages, use predict_messages. + + Args: + text: String input to pass to the model. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + Top model prediction as a string. + """ + + @abstractmethod + def predict_messages( + self, + messages: List[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + """Pass a message sequence to the model and return a message prediction. + + Use this method when passing in chat messages. If you want to pass in raw text, + use predict. + + Args: + messages: A sequence of chat messages corresponding to a single model input. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + Top model prediction as a message. + """ + + @abstractmethod + async def apredict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: + """Asynchronously pass a string to the model and return a string prediction. + + Use this method when calling pure text generation models and only the top + candidate generation is needed. + + Args: + text: String input to pass to the model. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + Top model prediction as a string. + """ + + @abstractmethod + async def apredict_messages( + self, + messages: List[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, + ) -> BaseMessage: + """Asynchronously pass messages to the model and return a message prediction. + + Use this method when calling chat models and only the top + candidate generation is needed. + + Args: + messages: A sequence of chat messages corresponding to a single model input. + stop: Stop words to use when generating. Model output is cut off at the + first occurrence of any of these substrings. + **kwargs: Arbitrary additional keyword arguments. These are usually passed + to the model provider API call. + + Returns: + Top model prediction as a message. + """ + + def get_token_ids(self, text: str) -> List[int]: + """Return the ordered ids of the tokens in a text. + + Args: + text: The string input to tokenize. + + Returns: + A list of ids corresponding to the tokens in the text, in order they occur + in the text. + """ + return _get_token_ids_default_method(text) + + def get_num_tokens(self, text: str) -> int: + """Get the number of tokens present in the text. + + Useful for checking if an input will fit in a model's context window. + + Args: + text: The string input to tokenize. + + Returns: + The integer number of tokens in the text. + """ + return len(self.get_token_ids(text)) + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + """Get the number of tokens in the messages. + + Useful for checking if an input will fit in a model's context window. + + Args: + messages: The message inputs to tokenize. + + Returns: + The sum of the number of tokens across the messages. + """ + return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages]) + + @classmethod + def _all_required_field_names(cls) -> Set: + all_required_field_names = set() + for field in cls.__fields__.values(): + all_required_field_names.add(field.name) + if field.has_alias: + all_required_field_names.add(field.alias) + return all_required_field_names diff --git a/langchain/tools/spark_sql/tool.py b/langchain/tools/spark_sql/tool.py index 43f862aa253..0b23741e923 100644 --- a/langchain/tools/spark_sql/tool.py +++ b/langchain/tools/spark_sql/tool.py @@ -4,7 +4,7 @@ from typing import Any, Dict, Optional from pydantic import BaseModel, Extra, Field, root_validator -from langchain.base_language import BaseLanguageModel +from langchain.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, diff --git a/langchain/tools/sql_database/tool.py b/langchain/tools/sql_database/tool.py index 11254047f91..5ab9a10ef45 100644 --- a/langchain/tools/sql_database/tool.py +++ b/langchain/tools/sql_database/tool.py @@ -4,7 +4,7 @@ from typing import Any, Dict, Optional from pydantic import BaseModel, Extra, Field, root_validator -from langchain.base_language import BaseLanguageModel +from langchain.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, diff --git a/langchain/tools/vectorstore/tool.py b/langchain/tools/vectorstore/tool.py index c5fae60440a..126f4bbd4a0 100644 --- a/langchain/tools/vectorstore/tool.py +++ b/langchain/tools/vectorstore/tool.py @@ -5,13 +5,13 @@ from typing import Any, Dict, Optional from pydantic import BaseModel, Field -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.chains import RetrievalQA, RetrievalQAWithSourcesChain from langchain.llms.openai import OpenAI +from langchain.schema.language_model import BaseLanguageModel from langchain.tools.base import BaseTool from langchain.vectorstores.base import VectorStore diff --git a/tests/integration_tests/test_schema.py b/tests/integration_tests/test_schema.py index 4c062244c09..9ff2609476f 100644 --- a/tests/integration_tests/test_schema.py +++ b/tests/integration_tests/test_schema.py @@ -1,6 +1,6 @@ """Test formatting functionality.""" -from langchain.base_language import _get_token_ids_default_method +from langchain.schema.language_model import _get_token_ids_default_method class TestTokenCountingWithGPT2Tokenizer: diff --git a/tests/unit_tests/client/test_runner_utils.py b/tests/unit_tests/client/test_runner_utils.py index 3c767cdfcc0..5deae684548 100644 --- a/tests/unit_tests/client/test_runner_utils.py +++ b/tests/unit_tests/client/test_runner_utils.py @@ -8,7 +8,6 @@ import pytest from langchainplus_sdk.client import LangChainPlusClient from langchainplus_sdk.schemas import Dataset, Example -from langchain.base_language import BaseLanguageModel from langchain.chains.base import Chain from langchain.chains.transform import TransformChain from langchain.client.runner_utils import ( @@ -20,6 +19,7 @@ from langchain.client.runner_utils import ( run_llm_or_chain, ) from langchain.schema import LLMResult +from langchain.schema.language_model import BaseLanguageModel from tests.unit_tests.llms.fake_chat_model import FakeChatModel from tests.unit_tests.llms.fake_llm import FakeLLM