refactor(langchain-classic): retarget deprecations to create_agent, other chores (#37164)

Sweep classic deprecations so every removal lands on `2.0.0`, runtime
warnings carry the auto-generated since/removal/alternative line, and
replacements steer at `langchain.agents.create_agent` and
`with_structured_output(...)` instead of pre-v1 LangGraph +
`python.langchain.com` links.

## Changes

- **Bump removal targets from `1.0` / `1.0.0` to `2.0.0`** across
agents, chains, memory, retrievers, structured-output, vectorstore
toolkits, and the `langchain_classic._api.module_import` shim — gives
users a real runway now that v1 has shipped.
- **Move bespoke `message=` strings onto `addendum=`** (or split into
`alternative=` + `addendum=`). `warn_deprecated` skips the
auto-generated since/removal/alternative line whenever `message=` is
set, so the prior pattern silently dropped that info from the runtime
`LangChainDeprecationWarning`. Matches the pattern already used in
`HTMLHeaderTextSplitter.split_text_from_url`, which is updated for
consistency.
- **Repoint `alternative=` at v1 replacements**: chains/memory/agent
toolkits → `langchain.agents.create_agent` (with checkpointer or
retrieval-tool guidance in the addendum); `openai_functions` and
`chains/structured_output` → `ChatModel.with_structured_output(...)`;
`openapi` chains → `ChatModel.bind_tools(...)` + HTTP client.
`ConversationChain` no longer points at `RunnableWithMessageHistory`.
- **Refresh `AGENT_DEPRECATION_WARNING`** in
`langchain_classic._api.deprecation` — drop stale LangGraph and
`python.langchain.com` links in favor of `langchain.agents.create_agent`
and the `docs.langchain.com/oss/python/migrate/langchain-v1` guide.
Propagates to all 13 caller sites in `agents/`.
- **Newly deprecate `langchain_classic.chat_models.init_chat_model` and
`langchain_classic.embeddings.init_embeddings`** with the framing
*"maintained in `langchain`; `langchain-classic` retains this entry
point for import-compatibility only"*. The classic docstring examples
and the warning admonition both point at `langchain.chat_models`.
- **Improve `init_chat_model` docstrings** in both `langchain_v1` and
the classic copy: clarify `provider:model` prefix vs. `model_provider=`,
recommend pinned IDs over moving aliases, add the `upstage` provider
row, and refresh examples to GA models (`gpt-5.5`, `claude-opus-4-7`).
- **Standardize partner Anthropic deprecations**: replace
`AnthropicLLM`'s `model_validator(raise_warning)` with
`@deprecated(since="0.1.0", removal="2.0.0",
alternative="ChatAnthropic")`, and pin the `ChatAnthropic`
`output_format` runtime warning at `langchain-anthropic 2.0.0` instead
of "a future version".
This commit is contained in:
Mason Daugherty
2026-05-03 13:15:59 -04:00
committed by GitHub
parent 255f227541
commit 5a9b1ec2dc
68 changed files with 550 additions and 620 deletions

View File

@@ -7,18 +7,12 @@ from langchain_core._api.deprecation import (
warn_deprecated,
)
# TODO: this is old, fix
AGENT_DEPRECATION_WARNING = (
"LangChain agents will continue to be supported, but it is recommended for new "
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
"full-featured framework for building agents, including support for "
"tool-calling, persistence of state, and human-in-the-loop workflows. For "
"details, refer to the "
"[LangGraph documentation](https://langchain-ai.github.io/langgraph/)"
" as well as guides for "
"[Migrating from AgentExecutor](https://python.langchain.com/docs/how_to/migrate_agent/)"
" and LangGraph's "
"[Pre-built ReAct agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/)."
"Use `langchain.agents.create_agent` for new applications. It provides a "
"more flexible agent factory with middleware support, structured output, "
"and integration with LangGraph for persistence, streaming, and "
"human-in-the-loop workflows. Migration guide: "
"https://docs.langchain.com/oss/python/migrate/langchain-v1"
)

View File

@@ -97,7 +97,7 @@ def create_importer(
warn_deprecated(
since="0.1",
pending=False,
removal="1.0",
removal="2.0.0",
message=(
f"Importing {name} from {package} is deprecated. "
f"Please replace deprecated imports:\n\n"
@@ -131,7 +131,7 @@ def create_importer(
warn_deprecated(
since="0.1",
pending=False,
removal="1.0",
removal="2.0.0",
message=(
f"Importing {name} from {package} is deprecated. "
f"Please replace deprecated imports:\n\n"

View File

@@ -610,7 +610,7 @@ class RunnableMultiActionAgent(BaseMultiActionAgent):
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class LLMSingleActionAgent(BaseSingleActionAgent):
"""Base class for single action agents."""
@@ -699,7 +699,7 @@ class LLMSingleActionAgent(BaseSingleActionAgent):
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class Agent(BaseSingleActionAgent):
"""Agent that calls the language model and deciding the action.

View File

@@ -21,16 +21,11 @@ from langchain_classic.chains.llm import LLMChain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function will continue to be supported, but it is recommended for new "
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
"full-featured framework for building agents, including support for "
"tool-calling, persistence of state, and human-in-the-loop workflows. "
"See API reference for this function for a replacement implementation: "
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501
"Read more here on how to create agents that query vector stores: "
"https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Bind a vector store retrieval tool to an agent built with "
"`create_agent`. See https://docs.langchain.com/oss/python/langchain/agents"
),
)
def create_vectorstore_agent(
@@ -118,16 +113,11 @@ def create_vectorstore_agent(
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function will continue to be supported, but it is recommended for new "
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
"full-featured framework for building agents, including support for "
"tool-calling, persistence of state, and human-in-the-loop workflows. "
"See API reference for this function for a replacement implementation: "
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501
"Read more here on how to create agents that query vector stores: "
"https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Bind a vector store retrieval tool per route to an agent built with "
"`create_agent`. See https://docs.langchain.com/oss/python/langchain/agents"
),
)
def create_vectorstore_router_agent(

View File

@@ -10,7 +10,7 @@ from langchain_classic._api.deprecation import AGENT_DEPRECATION_WARNING
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class AgentType(str, Enum):
"""An enum for agent types."""

View File

@@ -31,7 +31,7 @@ from langchain_classic.chains.llm import LLMChain
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class ChatAgent(Agent):
"""Chat Agent."""

View File

@@ -29,7 +29,7 @@ from langchain_classic.chains import LLMChain
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class ConversationalAgent(Agent):
"""An agent that holds a conversation in addition to using tools."""

View File

@@ -33,7 +33,7 @@ from langchain_classic.agents.utils import validate_tools_single_input
from langchain_classic.chains import LLMChain
@deprecated("0.1.0", alternative="create_json_chat_agent", removal="1.0")
@deprecated("0.1.0", alternative="create_json_chat_agent", removal="2.0.0")
class ConversationalChatAgent(Agent):
"""An agent designed to hold a conversation in addition to using tools."""

View File

@@ -19,7 +19,7 @@ from langchain_classic.agents.types import AGENT_TO_CLASS
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
def initialize_agent(
tools: Sequence[BaseTool],

View File

@@ -35,7 +35,7 @@ def _load_agent_from_tools(
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
@deprecated("0.1.0", removal="2.0.0")
def load_agent_from_config(
config: dict,
llm: BaseLanguageModel | None = None,
@@ -98,7 +98,7 @@ def load_agent_from_config(
return agent_cls(**combined_config)
@deprecated("0.1.0", removal="1.0")
@deprecated("0.1.0", removal="2.0.0")
def load_agent(
path: str | Path,
**kwargs: Any,

View File

@@ -40,7 +40,7 @@ class ChainConfig(NamedTuple):
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class ZeroShotAgent(Agent):
"""Agent for the MRKL chain.
@@ -179,7 +179,7 @@ class ZeroShotAgent(Agent):
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class MRKLChain(AgentExecutor):
"""Chain that implements the MRKL system."""

View File

@@ -35,7 +35,7 @@ from langchain_classic.agents.output_parsers.openai_functions import (
_NOT_SET = object()
@deprecated("0.1.0", alternative="create_openai_functions_agent", removal="1.0")
@deprecated("0.1.0", alternative="create_openai_functions_agent", removal="2.0.0")
class OpenAIFunctionsAgent(BaseSingleActionAgent):
"""An Agent driven by OpenAIs function powered API.

View File

@@ -103,7 +103,7 @@ def _parse_ai_message(message: BaseMessage) -> list[AgentAction] | AgentFinish:
_NOT_SET = object()
@deprecated("0.1.0", alternative="create_openai_tools_agent", removal="1.0")
@deprecated("0.1.0", alternative="create_openai_tools_agent", removal="2.0.0")
class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
"""Agent driven by OpenAIs function powered API.

View File

@@ -31,7 +31,7 @@ _LOOKUP_AND_SEARCH_TOOLS = {"Lookup", "Search"}
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class ReActDocstoreAgent(Agent):
"""Agent for the ReAct chain."""
@@ -84,7 +84,7 @@ class ReActDocstoreAgent(Agent):
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class DocstoreExplorer:
"""Class to assist with exploration of a document store."""
@@ -138,7 +138,7 @@ class DocstoreExplorer:
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class ReActTextWorldAgent(ReActDocstoreAgent):
"""Agent for the ReAct TextWorld chain."""
@@ -165,7 +165,7 @@ class ReActTextWorldAgent(ReActDocstoreAgent):
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
removal="2.0.0",
)
class ReActChain(AgentExecutor):
"""[Deprecated] Chain that implements the ReAct paper."""

View File

@@ -28,7 +28,7 @@ if TYPE_CHECKING:
from langchain_community.utilities.serpapi import SerpAPIWrapper
@deprecated("0.1.0", alternative="create_self_ask_with_search", removal="1.0")
@deprecated("0.1.0", alternative="create_self_ask_with_search", removal="2.0.0")
class SelfAskWithSearchAgent(Agent):
"""Agent for the self-ask-with-search paper."""
@@ -73,7 +73,7 @@ class SelfAskWithSearchAgent(Agent):
return ""
@deprecated("0.1.0", removal="1.0")
@deprecated("0.1.0", removal="2.0.0")
class SelfAskWithSearchChain(AgentExecutor):
"""[Deprecated] Chain that does self-ask with search."""

View File

@@ -35,7 +35,7 @@ from langchain_classic.tools.render import render_text_description_and_args
HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}"
@deprecated("0.1.0", alternative="create_structured_chat_agent", removal="1.0")
@deprecated("0.1.0", alternative="create_structured_chat_agent", removal="2.0.0")
class StructuredChatAgent(Agent):
"""Structured Chat Agent."""

View File

@@ -19,7 +19,7 @@ from langchain_classic.agents.xml.prompt import agent_instructions
from langchain_classic.chains.llm import LLMChain
@deprecated("0.1.0", alternative="create_xml_agent", removal="1.0")
@deprecated("0.1.0", alternative="create_xml_agent", removal="2.0.0")
class XMLAgent(BaseSingleActionAgent):
"""Agent that uses XML tags.

View File

@@ -18,10 +18,13 @@ from pydantic import ConfigDict
@deprecated(
since="0.3.3",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class BaseMemory(Serializable, ABC):

View File

@@ -58,12 +58,13 @@ try:
@deprecated(
since="0.2.13",
message=(
"This class is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.api.base.APIChain.html"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new agents with `create_agent` and bind a tool that issues "
"the HTTP request. See "
"https://docs.langchain.com/oss/python/langchain/agents"
),
removal="1.0",
)
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.

View File

@@ -365,7 +365,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
run_manager.get_sync() if run_manager else None,
)
@deprecated("0.1.0", alternative="invoke", removal="1.0")
@deprecated("0.1.0", alternative="invoke", removal="2.0.0")
def __call__(
self,
inputs: dict[str, Any] | Any,
@@ -417,7 +417,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
include_run_info=include_run_info,
)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
@deprecated("0.1.0", alternative="ainvoke", removal="2.0.0")
async def acall(
self,
inputs: dict[str, Any] | Any,
@@ -576,7 +576,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
raise ValueError(msg)
return self.output_keys[0]
@deprecated("0.1.0", alternative="invoke", removal="1.0")
@deprecated("0.1.0", alternative="invoke", removal="2.0.0")
def run(
self,
*args: Any,
@@ -650,7 +650,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
)
raise ValueError(msg)
@deprecated("0.1.0", alternative="ainvoke", removal="1.0")
@deprecated("0.1.0", alternative="ainvoke", removal="2.0.0")
async def arun(
self,
*args: Any,
@@ -796,7 +796,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
@deprecated("0.1.0", alternative="batch", removal="1.0")
@deprecated("0.1.0", alternative="batch", removal="2.0.0")
def apply(
self,
input_list: list[builtins.dict[str, Any]],

View File

@@ -167,11 +167,13 @@ class BaseCombineDocumentsChain(Chain, ABC):
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.base.AnalyzeDocumentChain.html"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent`, a retrieval tool, and an "
"appropriate text splitter. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
removal="1.0",
)
class AnalyzeDocumentChain(Chain):
"""Chain that splits documents, then analyzes it in pieces.

View File

@@ -19,11 +19,11 @@ from langchain_classic.chains.llm import LLMChain
@deprecated(
since="0.3.1",
removal="1.0",
message=(
"This class is deprecated. Please see the migration guide here for "
"a recommended replacement: "
"https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class MapReduceDocumentsChain(BaseCombineDocumentsChain):

View File

@@ -20,11 +20,11 @@ from langchain_classic.output_parsers.regex import RegexParser
@deprecated(
since="0.3.1",
removal="1.0",
message=(
"This class is deprecated. Please see the migration guide here for "
"a recommended replacement: "
"https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class MapRerankDocumentsChain(BaseCombineDocumentsChain):

View File

@@ -130,11 +130,11 @@ async def acollapse_docs(
@deprecated(
since="0.3.1",
removal="1.0",
message=(
"This class is deprecated. Please see the migration guide here for "
"a recommended replacement: "
"https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class ReduceDocumentsChain(BaseCombineDocumentsChain):

View File

@@ -23,11 +23,11 @@ def _get_default_document_prompt() -> PromptTemplate:
@deprecated(
since="0.3.1",
removal="1.0",
message=(
"This class is deprecated. Please see the migration guide here for "
"a recommended replacement: "
"https://python.langchain.com/docs/versions/migrating_chains/refine_docs_chain/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class RefineDocumentsChain(BaseCombineDocumentsChain):

View File

@@ -103,11 +103,11 @@ def create_stuff_documents_chain(
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_stuff_documents_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class StuffDocumentsChain(BaseCombineDocumentsChain):

View File

@@ -19,12 +19,13 @@ from langchain_classic.chains.llm import LLMChain
@deprecated(
since="0.2.13",
message=(
"This class is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.constitutional_ai.base.ConstitutionalChain.html"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Implement guardrails as middleware on an agent built with "
"`langchain.agents.create_agent`. See "
"https://docs.langchain.com/oss/python/langchain/guardrails"
),
removal="1.0",
)
class ConstitutionalChain(Chain):
r'''Chain for applying constitutional principles.

View File

@@ -13,8 +13,12 @@ from langchain_classic.memory.buffer import ConversationBufferMemory
@deprecated(
since="0.2.7",
alternative="langchain_core.runnables.history.RunnableWithMessageHistory",
removal="1.0",
alternative="langchain.agents.create_agent",
removal="2.0.0",
addendum=(
"Build a conversational agent with `langchain.agents.create_agent` and "
"persist message history via a LangGraph checkpointer."
),
)
class ConversationChain(LLMChain):
"""Chain to have a conversation and load context from memory.

View File

@@ -258,7 +258,7 @@ class BaseConversationalRetrievalChain(Chain):
"create_history_aware_retriever together with create_retrieval_chain "
"(see example in docstring)"
),
removal="1.0",
removal="2.0.0",
)
class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
r"""Chain for having a conversation based on retrieved documents.

View File

@@ -40,7 +40,7 @@ from langchain_classic.chains.base import Chain
@deprecated(
since="0.1.17",
alternative="RunnableSequence, e.g., `prompt | llm`",
removal="1.0",
removal="2.0.0",
)
class LLMChain(Chain):
"""Chain to run queries against LLMs.

View File

@@ -65,12 +65,13 @@ def _load_question_to_checked_assertions_chain(
@deprecated(
since="0.2.13",
message=(
"See LangGraph guides for a variety of self-reflection and corrective "
"strategies for question-answering and other tasks: "
"https://docs.langchain.com/oss/python/langchain/overview"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build self-reflection or corrective loops with `create_agent` (e.g. "
"via custom middleware). See "
"https://docs.langchain.com/oss/python/langchain/agents"
),
removal="1.0",
)
class LLMCheckerChain(Chain):
"""Chain for question-answering with self-verification.

View File

@@ -23,12 +23,12 @@ from langchain_classic.chains.llm_math.prompt import PROMPT
@deprecated(
since="0.2.13",
message=(
"This class is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.llm_math.base.LLMMathChain.html"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new agents with `create_agent` and bind a calculator/math tool. "
"See https://docs.langchain.com/oss/python/langchain/agents"
),
removal="1.0",
)
class LLMMathChain(Chain):
"""Chain that interprets a prompt and executes python code to do math.

View File

@@ -70,12 +70,13 @@ def _load_sequential_chain(
@deprecated(
since="0.2.13",
message=(
"See LangGraph guides for a variety of self-reflection and corrective "
"strategies for question-answering and other tasks: "
"https://docs.langchain.com/oss/python/langgraph/agentic-rag"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build self-reflection or corrective loops with `create_agent` (e.g. "
"via custom middleware). See "
"https://docs.langchain.com/oss/python/langchain/agents"
),
removal="1.0",
)
class LLMSummarizationCheckerChain(Chain):
"""Chain for question-answering with self-verification.

View File

@@ -676,11 +676,8 @@ type_to_loader_dict = {
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in langchain 1.0. "
"At that point chains must be imported from their respective modules."
),
removal="1.0",
removal="2.0.0",
addendum="Chains must be imported from their respective modules.",
)
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
@@ -699,11 +696,8 @@ def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in langchain 1.0. "
"At that point chains must be imported from their respective modules."
),
removal="1.0",
removal="2.0.0",
addendum="Chains must be imported from their respective modules.",
)
def load_chain(path: str | Path, **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""

View File

@@ -29,11 +29,11 @@ from langchain_classic.chains.llm import LLMChain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"Refer to migration guide here for a recommended implementation using "
"LangGraph: https://docs.langchain.com/oss/python/langgraph/graph-api#map-reduce-and-the-send-api"
"."
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For map-reduce branching, build a LangGraph using the Send API. See "
"https://docs.langchain.com/oss/python/langgraph/use-graph-api#map-reduce-and-the-send-api"
),
)
class MapReduceChain(Chain):

View File

@@ -18,13 +18,8 @@ from langchain_classic.chains.natbot.prompt import PROMPT
@deprecated(
since="0.2.13",
message=(
"Importing NatBotChain from langchain is deprecated and will be removed in "
"langchain 1.0. Please import from langchain_community instead: "
"from langchain_community.chains.natbot import NatBotChain. "
"You may need to pip install -U langchain-community."
),
removal="1.0",
removal="2.0.0",
alternative_import="langchain_community.chains.natbot.NatBotChain",
)
class NatBotChain(Chain):
"""Implement an LLM driven browser.

View File

@@ -38,7 +38,7 @@ __all__ = [
]
@deprecated(since="0.1.1", removal="1.0", alternative="create_openai_fn_runnable")
@deprecated(since="0.1.1", removal="2.0.0", alternative="create_openai_fn_runnable")
def create_openai_fn_chain(
functions: Sequence[dict[str, Any] | type[BaseModel] | Callable],
llm: BaseLanguageModel,
@@ -143,7 +143,7 @@ def create_openai_fn_chain(
@deprecated(
since="0.1.1",
removal="1.0",
removal="2.0.0",
alternative="ChatOpenAI.with_structured_output",
)
def create_structured_output_chain(

View File

@@ -122,7 +122,7 @@ def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable:
@deprecated(
since="0.2.13",
removal="1.0",
removal="2.0.0",
alternative="create_citation_fuzzy_match_runnable",
)
def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:

View File

@@ -46,31 +46,11 @@ Passage:
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"<https://docs.langchain.com/oss/python/langchain/models#structured-outputs>."
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-opus-4-1-20250805", temperature=0)
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
removal="2.0.0",
alternative="ChatModel.with_structured_output(...)",
addendum=(
"Available on chat models capable of tool calling. See "
"https://docs.langchain.com/oss/python/langchain/structured-output"
),
)
def create_extraction_chain(
@@ -109,37 +89,11 @@ def create_extraction_chain(
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"<https://docs.langchain.com/oss/python/langchain/models#structured-outputs>. "
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"If you notice other issues, please provide "
"feedback here:"
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-opus-4-1-20250805", temperature=0)
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
removal="2.0.0",
alternative="ChatModel.with_structured_output(...)",
addendum=(
"Available on chat models capable of tool calling. See "
"https://docs.langchain.com/oss/python/langchain/structured-output"
),
)
def create_extraction_chain_pydantic(

View File

@@ -27,6 +27,12 @@ if TYPE_CHECKING:
_logger = logging.getLogger(__name__)
_OPENAPI_REPLACEMENT = (
"Bind your OpenAPI operations as tools on a chat model with "
"`ChatModel.bind_tools(...)` and execute the resulting tool calls with an "
"HTTP client (e.g. `requests` or `httpx`)."
)
def _format_url(url: str, path_params: dict) -> str:
expected_path_param = re.findall(r"{(.*?)}", url)
@@ -85,11 +91,8 @@ def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) -
@deprecated(
since="1.0.4",
message=(
"This function is deprecated and will be removed in a future version. "
"Use LLM tool calling features directly with an HTTP client instead."
),
removal="2.0",
removal="2.0.0",
addendum=_OPENAPI_REPLACEMENT,
)
def openapi_spec_to_openai_fn(
spec: OpenAPISpec,
@@ -208,19 +211,18 @@ def openapi_spec_to_openai_fn(
@deprecated(
since="1.0.4",
message=(
"This class is deprecated and will be removed in a future version. "
"Use LLM tool calling features directly with an HTTP client instead."
),
removal="2.0",
removal="2.0.0",
addendum=_OPENAPI_REPLACEMENT,
)
class SimpleRequestChain(Chain):
"""Chain for making a simple request to an API endpoint."""
request_method: Callable
"""Method to use for making the request."""
output_key: str = "response"
"""Key to use for the output of the request."""
input_key: str = "function"
"""Key to use for the input of the request."""
@@ -267,11 +269,8 @@ class SimpleRequestChain(Chain):
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in a future version. "
"Use LLM tool calling features directly with an HTTP client instead."
),
removal="2.0",
removal="2.0.0",
addendum=_OPENAPI_REPLACEMENT,
)
def get_openapi_chain(
spec: OpenAPISpec | str,
@@ -292,8 +291,7 @@ def get_openapi_chain(
Args:
spec: OpenAPISpec or url/file/text string corresponding to one.
llm: language model, should be an OpenAI function-calling model, e.g.
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
llm: language model, should be an OpenAI function-calling model.
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
params: Request parameters.

View File

@@ -29,11 +29,13 @@ class AnswerWithSources(BaseModel):
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent`, a retrieval tool, and "
"`response_format` for structured responses. See "
"https://docs.langchain.com/oss/python/langchain/rag and "
"https://docs.langchain.com/oss/python/langchain/structured-output"
),
)
def create_qa_with_structure_chain(
@@ -112,11 +114,13 @@ def create_qa_with_structure_chain(
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent`, a retrieval tool, and "
"`response_format` for structured responses. See "
"https://docs.langchain.com/oss/python/langchain/rag and "
"https://docs.langchain.com/oss/python/langchain/structured-output"
),
)
def create_qa_with_sources_chain(

View File

@@ -35,18 +35,12 @@ Passage:
@deprecated(
since="0.2.13",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"See API reference for this function for replacement: <"
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain.html"
"> You can read more about `with_structured_output` here: "
"<https://docs.langchain.com/oss/python/langchain/models#structured-outputs>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
removal="2.0.0",
alternative="ChatModel.with_structured_output(...)",
addendum=(
"Available on chat models capable of tool calling. See "
"https://docs.langchain.com/oss/python/langchain/structured-output"
),
removal="1.0",
)
def create_tagging_chain(
schema: dict,
@@ -84,7 +78,7 @@ def create_tagging_chain(
)
```
Read more here: https://docs.langchain.com/oss/python/langchain/models#structured-outputs
Read more here: https://docs.langchain.com/oss/python/langchain/structured-output
Args:
schema: The schema of the entities to extract.
@@ -111,18 +105,12 @@ def create_tagging_chain(
@deprecated(
since="0.2.13",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"See API reference for this function for replacement: <"
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain_pydantic.html"
"> You can read more about `with_structured_output` here: "
"<https://docs.langchain.com/oss/python/langchain/models#structured-outputs>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
removal="2.0.0",
alternative="ChatModel.with_structured_output(...)",
addendum=(
"Available on chat models capable of tool calling. See "
"https://docs.langchain.com/oss/python/langchain/structured-output"
),
removal="1.0",
)
def create_tagging_chain_pydantic(
pydantic_schema: Any,
@@ -160,7 +148,7 @@ def create_tagging_chain_pydantic(
)
```
Read more here: https://docs.langchain.com/oss/python/langchain/models#structured-outputs
Read more here: https://docs.langchain.com/oss/python/langchain/structured-output
Args:
pydantic_schema: The Pydantic schema of the entities to extract.

View File

@@ -16,38 +16,13 @@ If a property is not present and is not required in the function parameters, do
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"<https://docs.langchain.com/oss/python/langchain/models#structured-outputs>. "
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"with_structured_output does not currently support a list of pydantic schemas. "
"If this is a blocker or if you notice other issues, please provide "
"feedback here:"
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-opus-4-1-20250805", temperature=0)
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
removal="2.0.0",
alternative="ChatModel.with_structured_output(...)",
addendum=(
"Available on chat models capable of tool calling. Note: "
"`with_structured_output` does not currently support a list of pydantic "
"schemas. See "
"https://docs.langchain.com/oss/python/langchain/structured-output"
),
)
def create_extraction_chain_pydantic(

View File

@@ -18,11 +18,12 @@ from langchain_classic.chains.qa_generation.prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
removal="1.0",
)
class QAGenerationChain(Chain):
"""Base class for question-answer generation chains.

View File

@@ -36,11 +36,11 @@ from langchain_classic.chains.qa_with_sources.map_reduce_prompt import (
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class BaseQAWithSourcesChain(Chain, ABC):
@@ -217,11 +217,11 @@ class BaseQAWithSourcesChain(Chain, ABC):
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class QAWithSourcesChain(BaseQAWithSourcesChain):

View File

@@ -166,17 +166,11 @@ def _load_refine_chain(
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/"
"\nSee also the following migration guides for replacements "
"based on `chain_type`:\n"
"stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n"
"map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n"
"refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n"
"map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
def load_qa_with_sources_chain(

View File

@@ -268,7 +268,7 @@ def get_query_constructor_prompt(
@deprecated(
since="0.2.13",
alternative="load_query_constructor_runnable",
removal="1.0",
removal="2.0.0",
)
def load_query_constructor_chain(
llm: BaseLanguageModel,

View File

@@ -231,16 +231,11 @@ def _load_refine_chain(
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. See the following migration guides for replacements "
"based on `chain_type`:\n"
"stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n"
"map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n"
"refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n"
"map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n"
"\nSee also guides on retrieval and question-answering here: "
"https://python.langchain.com/docs/how_to/#qa-with-rag"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
def load_qa_chain(

View File

@@ -30,11 +30,11 @@ from langchain_classic.chains.question_answering.stuff_prompt import PROMPT_SELE
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class BaseRetrievalQA(Chain):
@@ -208,11 +208,11 @@ class BaseRetrievalQA(Chain):
@deprecated(
since="0.1.17",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class RetrievalQA(BaseRetrievalQA):
@@ -297,11 +297,11 @@ class RetrievalQA(BaseRetrievalQA):
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build new RAG flows with `create_agent` and a retrieval tool. See "
"https://docs.langchain.com/oss/python/langchain/rag"
),
)
class VectorDBQA(BaseRetrievalQA):

View File

@@ -23,11 +23,12 @@ from langchain_classic.chains.router.base import RouterChain
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Use RunnableLambda to select from multiple prompt templates. See example "
"in API reference: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.router.llm_router.LLMRouterChain.html"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build routing logic with `create_agent` (e.g. with subagents or "
"prompt-selection middleware). See "
"https://docs.langchain.com/oss/python/langchain/agents"
),
)
class LLMRouterChain(RouterChain):

View File

@@ -24,10 +24,12 @@ from langchain_classic.chains.router.multi_prompt_prompt import (
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Please see migration guide here for recommended implementation: "
"https://python.langchain.com/docs/versions/migrating_chains/multi_prompt_chain/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"Build routing logic with `create_agent` (e.g. with subagents or "
"prompt-selection middleware). See "
"https://docs.langchain.com/oss/python/langchain/agents"
),
)
class MultiPromptChain(MultiRouteChain):

View File

@@ -30,37 +30,11 @@ from pydantic import BaseModel
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"You can read more about the method here: "
"<https://docs.langchain.com/oss/python/langchain/models#structured-outputs>. "
"Please follow our extraction use case documentation for more guidelines "
"on how to do information extraction with LLMs. "
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-opus-4-1-20250805", temperature=0)
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
removal="2.0.0",
alternative="ChatModel.with_structured_output(...)",
addendum=(
"Available on chat models capable of tool calling. See "
"https://docs.langchain.com/oss/python/langchain/structured-output"
),
)
def create_openai_fn_runnable(
@@ -149,37 +123,11 @@ def create_openai_fn_runnable(
@deprecated(
since="0.1.17",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"You can read more about the method here: "
"<https://docs.langchain.com/oss/python/langchain/models#structured-outputs>."
"Please follow our extraction use case documentation for more guidelines "
"on how to do information extraction with LLMs. "
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-opus-4-1-20250805", temperature=0)
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
removal="2.0.0",
alternative="ChatModel.with_structured_output(...)",
addendum=(
"Available on chat models capable of tool calling. See "
"https://docs.langchain.com/oss/python/langchain/structured-output"
),
)
def create_structured_output_runnable(

View File

@@ -5,6 +5,7 @@ from collections.abc import AsyncIterator, Callable, Iterator, Sequence
from importlib import util
from typing import Any, Literal, TypeAlias, cast, overload
from langchain_core._api import deprecated
from langchain_core.language_models import (
BaseChatModel,
LanguageModelInput,
@@ -68,6 +69,15 @@ def init_chat_model(
# FOR CONTRIBUTORS: If adding support for a new provider, please append the provider
# name to the supported list in the docstring below. Do *not* change the order of the
# existing providers.
@deprecated(
since="1.0.5",
removal="2.0.0",
alternative="langchain.chat_models.init_chat_model",
addendum=(
"Maintained in `langchain`; `langchain-classic` retains this entry point "
"for import-compatibility only."
),
)
def init_chat_model(
model: str | None = None,
*,
@@ -78,16 +88,33 @@ def init_chat_model(
) -> BaseChatModel | _ConfigurableModel:
"""Initialize a chat model from any supported provider using a unified interface.
!!! warning "Use `langchain.chat_models.init_chat_model` instead"
This function lives in `langchain-classic` and is no longer actively
maintained. New features and fixes land in the `langchain` package.
Update your imports:
```python
# Don't do this:
from langchain.chat_models import init_chat_model
# Do this instead:
from langchain.chat_models import init_chat_model
```
**Two main use cases:**
1. **Fixed model** specify the model upfront and get back a ready-to-use chat
model.
2. **Configurable model** choose to specify parameters (including model name) at
runtime via `config`. Makes it easy to switch between models/providers without
changing your code
1. **Fixed model** specify the model upfront and get a
ready-to-use chat model.
2. **Configurable model** choose to specify parameters
(including model name) at runtime via `config`. Makes it easy to
switch between models/providers without changing your code
!!! note
Requires the integration package for the chosen model provider to be installed.
!!! note "Installation requirements"
Requires the integration package for the chosen model provider to
be installed.
See the `model_provider` parameter below for specific package names
(e.g., `pip install langchain-openai`).
@@ -96,30 +123,49 @@ def init_chat_model(
for supported model parameters to use as `**kwargs`.
Args:
model: The name or ID of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5-20250929'`.
model: Name of the model to use, with provider prefix — e.g.,
`'openai:gpt-5.5'`.
You can also specify model and model provider in a single argument using
`'{model_provider}:{model}'` format, e.g. `'openai:o1'`.
A bare model name (e.g., `'claude-opus-4-7'`) is also accepted; we
will attempt to infer the provider from the prefix using the mapping
below. Inference is best-effort and not guaranteed, so prefer
the prefixed form when possible.
Will attempt to infer `model_provider` from model if not specified.
Prefer pinned model IDs over moving aliases (e.g.,
`'claude-haiku-4-5-20251001'` rather than `'claude-haiku-4-5'`)
so behavior does not drift if the alias is repointed upstream.
The following providers will be inferred based on these model prefixes:
Inferred providers by prefix (case-insensitive):
- `gpt-...` | `o1...` | `o3...` -> `openai`
- `claude...` -> `anthropic`
- `amazon...` -> `bedrock`
- `gemini...` -> `google_vertexai`
- `command...` -> `cohere`
- `accounts/fireworks...` -> `fireworks`
- `mistral...` -> `mistralai`
- `deepseek...` -> `deepseek`
- `grok...` -> `xai`
- `sonar...` -> `perplexity`
model_provider: The model provider if not specified as part of the model arg
(see above).
- `gpt-...` | `o1...` | `o3...` -> `openai`
- `claude...` -> `anthropic`
- `amazon....` | `anthropic....` | `meta....` -> `bedrock`
- `gemini...` -> `google_vertexai`
- `command...` -> `cohere`
- `accounts/fireworks...` -> `fireworks`
- `mistral...` | `mixtral...` -> `mistralai`
- `deepseek...` -> `deepseek`
- `grok...` -> `xai`
- `sonar...` -> `perplexity`
- `solar...` -> `upstage`
- `chatgpt...` | `text-davinci...` -> `openai` (legacy)
model_provider: Provider of the model, passed separately instead of
as a prefix on `model`.
Supported `model_provider` values and the corresponding integration package
are:
Equivalent to the prefix form — e.g.,
`model='claude-sonnet-4-5', model_provider='anthropic'` behaves
the same as `model='anthropic:claude-sonnet-4-5'`.
Prefer the prefix form on `model` for most usage. Reach for this
kwarg when:
- The provider is dynamic (read from config or an env var) and
you'd otherwise concatenate strings.
- You want `model` and `model_provider` to be independently
swappable at runtime via `configurable_fields` (e.g., to route
the same model name to a different host).
Supported values and the integration package each requires:
- `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `anthropic` -> [`langchain-anthropic`](https://docs.langchain.com/oss/python/integrations/providers/anthropic)
@@ -142,6 +188,7 @@ def init_chat_model(
- `nvidia` -> [`langchain-nvidia-ai-endpoints`](https://docs.langchain.com/oss/python/integrations/providers/nvidia)
- `xai` -> [`langchain-xai`](https://docs.langchain.com/oss/python/integrations/providers/xai)
- `perplexity` -> [`langchain-perplexity`](https://docs.langchain.com/oss/python/integrations/providers/perplexity)
- `upstage` -> [`langchain-upstage`](https://docs.langchain.com/oss/python/integrations/providers/upstage)
configurable_fields: Which model parameters are configurable at runtime:
- `None`: No configurable fields (i.e., a fixed model).
@@ -193,8 +240,9 @@ def init_chat_model(
Returns:
A [`BaseChatModel`][langchain_core.language_models.BaseChatModel] corresponding
to the `model_name` and `model_provider` specified if configurability is
inferred to be `False`. If configurable, a chat model emulator that
initializes the underlying model at runtime once a config is passed in.
inferred to be `False`.
If configurable, a chat model emulator that initializes the
underlying model at runtime once a config is passed in.
Raises:
ValueError: If `model_provider` cannot be inferred or isn't supported.
@@ -203,39 +251,28 @@ def init_chat_model(
???+ example "Initialize a non-configurable model"
```python
# pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
# pip install langchain langchain-openai
from langchain_classic.chat_models import init_chat_model
from langchain.chat_models import init_chat_model
o3_mini = init_chat_model("openai:o3-mini", temperature=0)
claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
gemini_2-5_flash = init_chat_model(
"google_vertexai:gemini-2.5-flash", temperature=0
)
o3_mini.invoke("what's your name")
claude_sonnet.invoke("what's your name")
gemini_2-5_flash.invoke("what's your name")
gpt_5 = init_chat_model("openai:gpt-5.5", temperature=0)
gpt_5.invoke("what's your name")
```
??? example "Partially configurable model with no default"
```python
# pip install langchain langchain-openai langchain-anthropic
# pip install langchain langchain-openai
from langchain_classic.chat_models import init_chat_model
from langchain.chat_models import init_chat_model
# (We don't need to specify configurable=True if a model isn't specified.)
configurable_model = init_chat_model(temperature=0)
configurable_model.invoke(
"what's your name", config={"configurable": {"model": "gpt-4o"}}
)
# Use GPT-4o to generate the response
# Use GPT-5.5 to generate the response
configurable_model.invoke(
"what's your name",
config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
config={"configurable": {"model": "gpt-5.5"}},
)
```
@@ -244,39 +281,41 @@ def init_chat_model(
```python
# pip install langchain langchain-openai langchain-anthropic
from langchain_classic.chat_models import init_chat_model
from langchain.chat_models import init_chat_model
configurable_model_with_default = init_chat_model(
"openai:gpt-4o",
"openai:gpt-5.5",
configurable_fields="any", # This allows us to configure other params like temperature, max_tokens, etc at runtime.
config_prefix="foo",
temperature=0,
)
configurable_model_with_default.invoke("what's your name")
# GPT-4o response with temperature 0 (as set in default)
# GPT-5.5 response with temperature 0 (as set in default)
# Invoke overriding model and temperature at runtime via config.
# Note the use of the "foo_" prefix on the config keys, which matches
# the config_prefix we set when initializing the model.
configurable_model_with_default.invoke(
"what's your name",
config={
"configurable": {
"foo_model": "anthropic:claude-sonnet-4-5-20250929",
"foo_model": "anthropic:claude-opus-4-7",
"foo_temperature": 0.6,
}
},
)
# Override default to use Sonnet 4.5 with temperature 0.6 to generate response
```
??? example "Bind tools to a configurable model"
You can call any chat model declarative methods on a configurable model in the
same way that you would with a normal model:
You can call any chat model declarative methods on a configurable model
in the same way that you would with a normal model:
```python
# pip install langchain langchain-openai langchain-anthropic
from langchain_classic.chat_models import init_chat_model
from langchain.chat_models import init_chat_model
from pydantic import BaseModel, Field
@@ -297,7 +336,7 @@ def init_chat_model(
configurable_model = init_chat_model(
"gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
"gpt-5.5", configurable_fields=("model", "model_provider"), temperature=0
)
configurable_model_with_tools = configurable_model.bind_tools(
@@ -309,37 +348,15 @@ def init_chat_model(
configurable_model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
# Use GPT-4o
# Use GPT-5.5
configurable_model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?",
config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
config={"configurable": {"model": "claude-opus-4-7"}},
)
# Use Sonnet 4.5
# Use Opus 4.7
```
!!! warning "Behavior changed in `langchain` 0.2.8"
Support for `configurable_fields` and `config_prefix` added.
!!! warning "Behavior changed in `langchain` 0.2.12"
Support for Ollama via langchain-ollama package added
(`langchain_ollama.ChatOllama`). Previously,
the now-deprecated langchain-community version of Ollama was imported
(`langchain_community.chat_models.ChatOllama`).
Support for AWS Bedrock models via the Converse API added
(`model_provider="bedrock_converse"`).
!!! warning "Behavior changed in `langchain` 0.3.5"
Out of beta.
!!! warning "Behavior changed in `langchain` 0.3.19"
Support for Deepseek, IBM, Nvidia, and xAI models added.
""" # noqa: E501
if not model and not configurable_fields:
configurable_fields = ("model", "model_provider")

View File

@@ -2,6 +2,7 @@ import functools
from importlib import util
from typing import Any
from langchain_core._api import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.runnables import Runnable
@@ -127,6 +128,15 @@ def _check_pkg(pkg: str) -> None:
raise ImportError(msg)
@deprecated(
since="1.0.5",
removal="2.0.0",
alternative="langchain.embeddings.init_embeddings",
addendum=(
"Maintained in `langchain`; `langchain-classic` retains this entry point "
"for import-compatibility only."
),
)
def init_embeddings(
model: str,
*,

View File

@@ -12,10 +12,13 @@ from langchain_classic.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class ConversationBufferMemory(BaseChatMemory):
@@ -90,10 +93,13 @@ class ConversationBufferMemory(BaseChatMemory):
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class ConversationStringBufferMemory(BaseMemory):

View File

@@ -9,10 +9,13 @@ from langchain_classic.memory.chat_memory import BaseChatMemory
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class ConversationBufferWindowMemory(BaseChatMemory):

View File

@@ -16,10 +16,13 @@ from langchain_classic.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class BaseChatMemory(BaseMemory, ABC):

View File

@@ -29,10 +29,13 @@ logger = logging.getLogger(__name__)
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class BaseEntityStore(BaseModel, ABC):
@@ -61,10 +64,13 @@ class BaseEntityStore(BaseModel, ABC):
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class InMemoryEntityStore(BaseEntityStore):
@@ -95,10 +101,13 @@ class InMemoryEntityStore(BaseEntityStore):
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class UpstashRedisEntityStore(BaseEntityStore):
@@ -210,10 +219,13 @@ class UpstashRedisEntityStore(BaseEntityStore):
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class RedisEntityStore(BaseEntityStore):
@@ -334,10 +346,13 @@ class RedisEntityStore(BaseEntityStore):
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class SQLiteEntityStore(BaseEntityStore):
@@ -456,10 +471,13 @@ class SQLiteEntityStore(BaseEntityStore):
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class ConversationEntityMemory(BaseChatMemory):

View File

@@ -18,9 +18,11 @@ from langchain_classic.memory.prompt import SUMMARY_PROMPT
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Refer here for how to incorporate summaries of conversation history: "
removal="2.0.0",
addendum=(
"For agents, summarize conversation history with `create_agent` and "
"summarization middleware. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langgraph/add-memory#summarize-messages"
),
)
@@ -82,10 +84,13 @@ class SummarizerMixin(BaseModel):
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):

View File

@@ -11,10 +11,13 @@ from langchain_classic.memory.summary import SummarizerMixin
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):

View File

@@ -10,10 +10,13 @@ from langchain_classic.memory.chat_memory import BaseChatMemory
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class ConversationTokenBufferMemory(BaseChatMemory):

View File

@@ -14,10 +14,13 @@ from langchain_classic.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
removal="2.0.0",
alternative="langchain.agents.create_agent",
addendum=(
"For agents that need to remember prior interactions, use "
"`create_agent` with checkpointing or the `Store` API. See "
"https://docs.langchain.com/oss/python/langchain/short-term-memory and "
"https://docs.langchain.com/oss/python/langchain/long-term-memory"
),
)
class VectorStoreRetrieverMemory(BaseMemory):

View File

@@ -14,7 +14,7 @@ from typing_extensions import override
@deprecated(
since="0.0.30",
removal="1.0",
removal="2.0.0",
alternative_import="langchain_cohere.CohereRerank",
)
class CohereRerank(BaseDocumentCompressor):

View File

@@ -219,14 +219,16 @@ def init_chat_model(
**Two main use cases:**
1. **Fixed model** specify the model upfront and get a ready-to-use chat model.
2. **Configurable model** choose to specify parameters (including model name) at
runtime via `config`. Makes it easy to switch between models/providers without
changing your code
1. **Fixed model** specify the model upfront and get a
ready-to-use chat model.
2. **Configurable model** choose to specify parameters
(including model name) at runtime via `config`. Makes it easy to
switch between models/providers without changing your code
!!! note "Installation requirements"
Requires the integration package for the chosen model provider to be installed.
Requires the integration package for the chosen model provider to
be installed.
See the `model_provider` parameter below for specific package names
(e.g., `pip install langchain-openai`).
@@ -235,31 +237,49 @@ def init_chat_model(
for supported model parameters to use as `**kwargs`.
Args:
model: The model name, optionally prefixed with provider (e.g., `'openai:gpt-4o'`).
model: Name of the model to use, with provider prefix — e.g.,
`'openai:gpt-5.5'`.
Prefer exact model IDs from provider docs over aliases for reliable behavior
(e.g., dated versions like `'...-20250514'` instead of `'...-latest'`).
A bare model name (e.g., `'claude-opus-4-7'`) is also accepted; we
will attempt to infer the provider from the prefix using the mapping
below. Inference is best-effort and not guaranteed, so prefer
the prefixed form when possible.
Will attempt to infer `model_provider` from model if not specified.
Prefer pinned model IDs over moving aliases (e.g.,
`'claude-haiku-4-5-20251001'` rather than `'claude-haiku-4-5'`)
so behavior does not drift if the alias is repointed upstream.
The following providers will be inferred based on these model prefixes:
Inferred providers by prefix (case-insensitive):
- `gpt-...` | `o1...` | `o3...` -> `openai`
- `claude...` -> `anthropic`
- `amazon...` -> `bedrock`
- `gemini...` -> `google_vertexai`
- `command...` -> `cohere`
- `accounts/fireworks...` -> `fireworks`
- `mistral...` -> `mistralai`
- `deepseek...` -> `deepseek`
- `grok...` -> `xai`
- `sonar...` -> `perplexity`
- `solar...` -> `upstage`
model_provider: The model provider if not specified as part of the model arg
(see above).
- `gpt-...` | `o1...` | `o3...` -> `openai`
- `claude...` -> `anthropic`
- `amazon....` | `anthropic....` | `meta....` -> `bedrock`
- `gemini...` -> `google_vertexai` (default changes in next major; pass `model_provider` to lock in)
- `command...` -> `cohere`
- `accounts/fireworks...` -> `fireworks`
- `mistral...` | `mixtral...` -> `mistralai`
- `deepseek...` -> `deepseek`
- `grok...` -> `xai`
- `sonar...` -> `perplexity`
- `solar...` -> `upstage`
- `chatgpt...` | `text-davinci...` -> `openai` (legacy)
model_provider: Provider of the model, passed separately instead of
as a prefix on `model`.
Supported `model_provider` values and the corresponding integration package
are:
Equivalent to the prefix form — e.g.,
`model='claude-sonnet-4-5', model_provider='anthropic'` behaves
the same as `model='anthropic:claude-sonnet-4-5'`.
Prefer the prefix form on `model` for most usage. Reach for this
kwarg when:
- The provider is dynamic (read from config or an env var) and
you'd otherwise concatenate strings.
- You want `model` and `model_provider` to be independently
swappable at runtime via `configurable_fields` (e.g., to route
the same model name to a different host).
Supported values and the integration package each requires:
- `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `anthropic` -> [`langchain-anthropic`](https://docs.langchain.com/oss/python/integrations/providers/anthropic)
@@ -338,9 +358,9 @@ def init_chat_model(
Returns:
A `BaseChatModel` corresponding to the `model_name` and `model_provider`
specified if configurability is inferred to be `False`. If configurable, a
chat model emulator that initializes the underlying model at runtime once a
config is passed in.
specified if configurability is inferred to be `False`.
If configurable, a chat model emulator that initializes the
underlying model at runtime once a config is passed in.
Raises:
ValueError: If `model_provider` cannot be inferred or isn't supported.
@@ -349,35 +369,28 @@ def init_chat_model(
???+ example "Initialize a non-configurable model"
```python
# pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
# pip install langchain langchain-openai
from langchain.chat_models import init_chat_model
o3_mini = init_chat_model("openai:o3-mini", temperature=0)
claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
gemini_2-5_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
o3_mini.invoke("what's your name")
claude_sonnet.invoke("what's your name")
gemini_2-5_flash.invoke("what's your name")
gpt_5 = init_chat_model("openai:gpt-5.5", temperature=0)
gpt_5.invoke("what's your name")
```
??? example "Partially configurable model with no default"
```python
# pip install langchain langchain-openai langchain-anthropic
# pip install langchain langchain-openai
from langchain.chat_models import init_chat_model
# (We don't need to specify configurable=True if a model isn't specified.)
configurable_model = init_chat_model(temperature=0)
configurable_model.invoke("what's your name", config={"configurable": {"model": "gpt-4o"}})
# Use GPT-4o to generate the response
# Use GPT-5.5 to generate the response
configurable_model.invoke(
"what's your name",
config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
config={"configurable": {"model": "gpt-5.5"}},
)
```
@@ -389,31 +402,33 @@ def init_chat_model(
from langchain.chat_models import init_chat_model
configurable_model_with_default = init_chat_model(
"openai:gpt-4o",
"openai:gpt-5.5",
configurable_fields="any", # This allows us to configure other params like temperature, max_tokens, etc at runtime.
config_prefix="foo",
temperature=0,
)
configurable_model_with_default.invoke("what's your name")
# GPT-4o response with temperature 0 (as set in default)
# GPT-5.5 response with temperature 0 (as set in default)
# Invoke overriding model and temperature at runtime via config.
# Note the use of the "foo_" prefix on the config keys, which matches
# the config_prefix we set when initializing the model.
configurable_model_with_default.invoke(
"what's your name",
config={
"configurable": {
"foo_model": "anthropic:claude-sonnet-4-5-20250929",
"foo_model": "anthropic:claude-opus-4-7",
"foo_temperature": 0.6,
}
},
)
# Override default to use Sonnet 4.5 with temperature 0.6 to generate response
```
??? example "Bind tools to a configurable model"
You can call any chat model declarative methods on a configurable model in the
same way that you would with a normal model:
You can call any chat model declarative methods on a configurable model
in the same way that you would with a normal model:
```python
# pip install langchain langchain-openai langchain-anthropic
@@ -435,7 +450,7 @@ def init_chat_model(
configurable_model = init_chat_model(
"gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
"gpt-5.5", configurable_fields=("model", "model_provider"), temperature=0
)
configurable_model_with_tools = configurable_model.bind_tools(
@@ -447,13 +462,13 @@ def init_chat_model(
configurable_model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
# Use GPT-4o
# Use GPT-5.5
configurable_model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?",
config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
config={"configurable": {"model": "claude-opus-4-7"}},
)
# Use Sonnet 4.5
# Use Opus 4.7
```
""" # noqa: E501
@@ -539,8 +554,17 @@ def _attempt_infer_model_provider(model_name: str) -> str | None:
if model_lower.startswith("accounts/fireworks"):
return "fireworks"
# Google models
# Google models — prefix is ambiguous (Vertex AI vs the GenAI/AI Studio API).
if model_lower.startswith("gemini"):
warnings.warn(
f"Inferred `model_provider='google_vertexai'` from {model_name!r}. "
"This default will change to 'google_genai' in the next major release."
"To keep current behavior, pass `model_provider='google_vertexai'` "
f"(or use the prefix form, e.g. 'google_vertexai:{model_name}'); "
"for AI Studio / Gemini API, use 'google_genai' instead.",
DeprecationWarning,
stacklevel=5,
)
return "google_vertexai"
# AWS Bedrock models

View File

@@ -1285,8 +1285,9 @@ class ChatAnthropic(BaseChatModel):
# Handle deprecated output_format parameter for backward compatibility
if "output_format" in payload:
warnings.warn(
"The 'output_format' parameter is deprecated and will be removed in a "
"future version. Use 'output_config={\"format\": ...}' instead.",
"The 'output_format' parameter is deprecated and will be removed in "
"langchain-anthropic 2.0.0. Use 'output_config={\"format\": ...}' "
"instead.",
DeprecationWarning,
stacklevel=2,
)

View File

@@ -3,11 +3,11 @@
from __future__ import annotations
import re
import warnings
from collections.abc import AsyncIterator, Callable, Iterator, Mapping
from typing import Any
import anthropic
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
@@ -131,6 +131,7 @@ class _AnthropicCommon(BaseLanguageModel):
return stop
@deprecated(since="0.1.0", removal="2.0.0", alternative="ChatAnthropic")
class AnthropicLLM(LLM, _AnthropicCommon):
"""Anthropic text completion large language model (legacy LLM).
@@ -150,18 +151,6 @@ class AnthropicLLM(LLM, _AnthropicCommon):
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def raise_warning(cls, values: dict) -> Any:
"""Raise warning that this class is deprecated."""
warnings.warn(
"This Anthropic LLM is deprecated. "
"Please use `from langchain_anthropic import ChatAnthropic` "
"instead",
stacklevel=2,
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""

View File

@@ -510,7 +510,7 @@ wheels = [
[[package]]
name = "langchain"
version = "1.2.15"
version = "1.2.17"
source = { editable = "../../langchain_v1" }
dependencies = [
{ name = "langchain-core" },
@@ -734,7 +734,7 @@ wheels = [
[[package]]
name = "langchain-tests"
version = "1.1.6"
version = "1.1.7"
source = { editable = "../../standard-tests" }
dependencies = [
{ name = "httpx" },

View File

@@ -188,9 +188,8 @@ class HTMLHeaderTextSplitter:
@deprecated(
since="1.1.2",
removal="2.0.0",
message=(
"Please fetch the HTML content from the URL yourself and pass it "
"to split_text."
addendum=(
"Fetch the HTML content from the URL yourself and pass it to `split_text`."
),
)
def split_text_from_url(