mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-20 05:43:55 +00:00
core[minor], langchain[minor]: deprecate old Chain and LLM methods (#15499)
This commit is contained in:
parent
fd5fbb507d
commit
00dfbd2a99
@ -15,6 +15,7 @@ from typing import (
|
|||||||
|
|
||||||
from typing_extensions import TypeAlias
|
from typing_extensions import TypeAlias
|
||||||
|
|
||||||
|
from langchain_core._api import deprecated
|
||||||
from langchain_core.messages import AnyMessage, BaseMessage, get_buffer_string
|
from langchain_core.messages import AnyMessage, BaseMessage, get_buffer_string
|
||||||
from langchain_core.prompt_values import PromptValue
|
from langchain_core.prompt_values import PromptValue
|
||||||
from langchain_core.runnables import Runnable, RunnableSerializable
|
from langchain_core.runnables import Runnable, RunnableSerializable
|
||||||
@ -60,17 +61,6 @@ class BaseLanguageModel(
|
|||||||
"""Abstract base class for interfacing with language models.
|
"""Abstract base class for interfacing with language models.
|
||||||
|
|
||||||
All language model wrappers inherit from BaseLanguageModel.
|
All language model wrappers inherit from BaseLanguageModel.
|
||||||
|
|
||||||
Exposes three main methods:
|
|
||||||
- generate_prompt: generate language model outputs for a sequence of prompt
|
|
||||||
values. A prompt value is a model input that can be converted to any language
|
|
||||||
model input format (string or messages).
|
|
||||||
- predict: pass in a single string to a language model and return a string
|
|
||||||
prediction.
|
|
||||||
- predict_messages: pass in a sequence of BaseMessages (corresponding to a single
|
|
||||||
model call) to a language model and return a BaseMessage prediction.
|
|
||||||
|
|
||||||
Each of these has an equivalent asynchronous method.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -160,11 +150,12 @@ class BaseLanguageModel(
|
|||||||
prompt and additional model provider-specific output.
|
prompt and additional model provider-specific output.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def predict(
|
def predict(
|
||||||
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Pass a single string input to the model and return a string prediction.
|
"""Pass a single string input to the model and return a string.
|
||||||
|
|
||||||
Use this method when passing in raw text. If you want to pass in specific
|
Use this method when passing in raw text. If you want to pass in specific
|
||||||
types of chat messages, use predict_messages.
|
types of chat messages, use predict_messages.
|
||||||
@ -180,6 +171,7 @@ class BaseLanguageModel(
|
|||||||
Top model prediction as a string.
|
Top model prediction as a string.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def predict_messages(
|
def predict_messages(
|
||||||
self,
|
self,
|
||||||
@ -188,7 +180,7 @@ class BaseLanguageModel(
|
|||||||
stop: Optional[Sequence[str]] = None,
|
stop: Optional[Sequence[str]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> BaseMessage:
|
) -> BaseMessage:
|
||||||
"""Pass a message sequence to the model and return a message prediction.
|
"""Pass a message sequence to the model and return a message.
|
||||||
|
|
||||||
Use this method when passing in chat messages. If you want to pass in raw text,
|
Use this method when passing in chat messages. If you want to pass in raw text,
|
||||||
use predict.
|
use predict.
|
||||||
@ -204,11 +196,12 @@ class BaseLanguageModel(
|
|||||||
Top model prediction as a message.
|
Top model prediction as a message.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="ainvoke", removal="0.2.0")
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
async def apredict(
|
async def apredict(
|
||||||
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Asynchronously pass a string to the model and return a string prediction.
|
"""Asynchronously pass a string to the model and return a string.
|
||||||
|
|
||||||
Use this method when calling pure text generation models and only the top
|
Use this method when calling pure text generation models and only the top
|
||||||
candidate generation is needed.
|
candidate generation is needed.
|
||||||
@ -224,6 +217,7 @@ class BaseLanguageModel(
|
|||||||
Top model prediction as a string.
|
Top model prediction as a string.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="ainvoke", removal="0.2.0")
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
async def apredict_messages(
|
async def apredict_messages(
|
||||||
self,
|
self,
|
||||||
@ -232,7 +226,7 @@ class BaseLanguageModel(
|
|||||||
stop: Optional[Sequence[str]] = None,
|
stop: Optional[Sequence[str]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> BaseMessage:
|
) -> BaseMessage:
|
||||||
"""Asynchronously pass messages to the model and return a message prediction.
|
"""Asynchronously pass messages to the model and return a message.
|
||||||
|
|
||||||
Use this method when calling chat models and only the top
|
Use this method when calling chat models and only the top
|
||||||
candidate generation is needed.
|
candidate generation is needed.
|
||||||
|
@ -16,6 +16,7 @@ from typing import (
|
|||||||
cast,
|
cast,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from langchain_core._api import deprecated
|
||||||
from langchain_core.callbacks import (
|
from langchain_core.callbacks import (
|
||||||
AsyncCallbackManager,
|
AsyncCallbackManager,
|
||||||
AsyncCallbackManagerForLLMRun,
|
AsyncCallbackManagerForLLMRun,
|
||||||
@ -108,7 +109,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
callbacks: Callbacks = Field(default=None, exclude=True)
|
callbacks: Callbacks = Field(default=None, exclude=True)
|
||||||
"""Callbacks to add to the run trace."""
|
"""Callbacks to add to the run trace."""
|
||||||
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
|
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
|
||||||
"""Callback manager to add to the run trace."""
|
"""[DEPRECATED] Callback manager to add to the run trace."""
|
||||||
tags: Optional[List[str]] = Field(default=None, exclude=True)
|
tags: Optional[List[str]] = Field(default=None, exclude=True)
|
||||||
"""Tags to add to the run trace."""
|
"""Tags to add to the run trace."""
|
||||||
metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True)
|
metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True)
|
||||||
@ -345,7 +346,30 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
run_name: Optional[str] = None,
|
run_name: Optional[str] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> LLMResult:
|
) -> LLMResult:
|
||||||
"""Top Level call"""
|
"""Pass a sequence of prompts to the model and return model generations.
|
||||||
|
|
||||||
|
This method should make use of batched calls for models that expose a batched
|
||||||
|
API.
|
||||||
|
|
||||||
|
Use this method when you want to:
|
||||||
|
1. take advantage of batched calls,
|
||||||
|
2. need more output from the model than just the top generated value,
|
||||||
|
3. are building chains that are agnostic to the underlying language model
|
||||||
|
type (e.g., pure text completion models vs chat models).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: List of list of messages.
|
||||||
|
stop: Stop words to use when generating. Model output is cut off at the
|
||||||
|
first occurrence of any of these substrings.
|
||||||
|
callbacks: Callbacks to pass through. Used for executing additional
|
||||||
|
functionality, such as logging or streaming, throughout generation.
|
||||||
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||||
|
to the model provider API call.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An LLMResult, which contains a list of candidate Generations for each input
|
||||||
|
prompt and additional model provider-specific output.
|
||||||
|
"""
|
||||||
params = self._get_invocation_params(stop=stop, **kwargs)
|
params = self._get_invocation_params(stop=stop, **kwargs)
|
||||||
options = {"stop": stop}
|
options = {"stop": stop}
|
||||||
|
|
||||||
@ -407,7 +431,30 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
run_name: Optional[str] = None,
|
run_name: Optional[str] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> LLMResult:
|
) -> LLMResult:
|
||||||
"""Top Level call"""
|
"""Asynchronously pass a sequence of prompts to a model and return generations.
|
||||||
|
|
||||||
|
This method should make use of batched calls for models that expose a batched
|
||||||
|
API.
|
||||||
|
|
||||||
|
Use this method when you want to:
|
||||||
|
1. take advantage of batched calls,
|
||||||
|
2. need more output from the model than just the top generated value,
|
||||||
|
3. are building chains that are agnostic to the underlying language model
|
||||||
|
type (e.g., pure text completion models vs chat models).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: List of list of messages.
|
||||||
|
stop: Stop words to use when generating. Model output is cut off at the
|
||||||
|
first occurrence of any of these substrings.
|
||||||
|
callbacks: Callbacks to pass through. Used for executing additional
|
||||||
|
functionality, such as logging or streaming, throughout generation.
|
||||||
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||||
|
to the model provider API call.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An LLMResult, which contains a list of candidate Generations for each input
|
||||||
|
prompt and additional model provider-specific output.
|
||||||
|
"""
|
||||||
params = self._get_invocation_params(stop=stop, **kwargs)
|
params = self._get_invocation_params(stop=stop, **kwargs)
|
||||||
options = {"stop": stop}
|
options = {"stop": stop}
|
||||||
|
|
||||||
@ -632,6 +679,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
) -> AsyncIterator[ChatGenerationChunk]:
|
) -> AsyncIterator[ChatGenerationChunk]:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
def __call__(
|
def __call__(
|
||||||
self,
|
self,
|
||||||
messages: List[BaseMessage],
|
messages: List[BaseMessage],
|
||||||
@ -663,11 +711,13 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
else:
|
else:
|
||||||
raise ValueError("Unexpected generation type")
|
raise ValueError("Unexpected generation type")
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
def call_as_llm(
|
def call_as_llm(
|
||||||
self, message: str, stop: Optional[List[str]] = None, **kwargs: Any
|
self, message: str, stop: Optional[List[str]] = None, **kwargs: Any
|
||||||
) -> str:
|
) -> str:
|
||||||
return self.predict(message, stop=stop, **kwargs)
|
return self.predict(message, stop=stop, **kwargs)
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
def predict(
|
def predict(
|
||||||
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
||||||
) -> str:
|
) -> str:
|
||||||
@ -681,6 +731,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
else:
|
else:
|
||||||
raise ValueError("Cannot use predict when output is not a string.")
|
raise ValueError("Cannot use predict when output is not a string.")
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
def predict_messages(
|
def predict_messages(
|
||||||
self,
|
self,
|
||||||
messages: List[BaseMessage],
|
messages: List[BaseMessage],
|
||||||
@ -694,6 +745,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
_stop = list(stop)
|
_stop = list(stop)
|
||||||
return self(messages, stop=_stop, **kwargs)
|
return self(messages, stop=_stop, **kwargs)
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="ainvoke", removal="0.2.0")
|
||||||
async def apredict(
|
async def apredict(
|
||||||
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
||||||
) -> str:
|
) -> str:
|
||||||
@ -709,6 +761,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|||||||
else:
|
else:
|
||||||
raise ValueError("Cannot use predict when output is not a string.")
|
raise ValueError("Cannot use predict when output is not a string.")
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="ainvoke", removal="0.2.0")
|
||||||
async def apredict_messages(
|
async def apredict_messages(
|
||||||
self,
|
self,
|
||||||
messages: List[BaseMessage],
|
messages: List[BaseMessage],
|
||||||
|
@ -36,6 +36,7 @@ from tenacity import (
|
|||||||
wait_exponential,
|
wait_exponential,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from langchain_core._api import deprecated
|
||||||
from langchain_core.callbacks import (
|
from langchain_core.callbacks import (
|
||||||
AsyncCallbackManager,
|
AsyncCallbackManager,
|
||||||
AsyncCallbackManagerForLLMRun,
|
AsyncCallbackManagerForLLMRun,
|
||||||
@ -157,14 +158,17 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
It should take in a prompt and return a string."""
|
It should take in a prompt and return a string."""
|
||||||
|
|
||||||
cache: Optional[bool] = None
|
cache: Optional[bool] = None
|
||||||
|
"""Whether to cache the response."""
|
||||||
verbose: bool = Field(default_factory=_get_verbosity)
|
verbose: bool = Field(default_factory=_get_verbosity)
|
||||||
"""Whether to print out response text."""
|
"""Whether to print out response text."""
|
||||||
callbacks: Callbacks = Field(default=None, exclude=True)
|
callbacks: Callbacks = Field(default=None, exclude=True)
|
||||||
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
|
"""Callbacks to add to the run trace."""
|
||||||
tags: Optional[List[str]] = Field(default=None, exclude=True)
|
tags: Optional[List[str]] = Field(default=None, exclude=True)
|
||||||
"""Tags to add to the run trace."""
|
"""Tags to add to the run trace."""
|
||||||
metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True)
|
metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True)
|
||||||
"""Metadata to add to the run trace."""
|
"""Metadata to add to the run trace."""
|
||||||
|
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
|
||||||
|
"""[DEPRECATED]"""
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
"""Configuration for this pydantic object."""
|
"""Configuration for this pydantic object."""
|
||||||
@ -576,7 +580,30 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
run_name: Optional[Union[str, List[str]]] = None,
|
run_name: Optional[Union[str, List[str]]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> LLMResult:
|
) -> LLMResult:
|
||||||
"""Run the LLM on the given prompt and input."""
|
"""Pass a sequence of prompts to a model and return generations.
|
||||||
|
|
||||||
|
This method should make use of batched calls for models that expose a batched
|
||||||
|
API.
|
||||||
|
|
||||||
|
Use this method when you want to:
|
||||||
|
1. take advantage of batched calls,
|
||||||
|
2. need more output from the model than just the top generated value,
|
||||||
|
3. are building chains that are agnostic to the underlying language model
|
||||||
|
type (e.g., pure text completion models vs chat models).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompts: List of string prompts.
|
||||||
|
stop: Stop words to use when generating. Model output is cut off at the
|
||||||
|
first occurrence of any of these substrings.
|
||||||
|
callbacks: Callbacks to pass through. Used for executing additional
|
||||||
|
functionality, such as logging or streaming, throughout generation.
|
||||||
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||||
|
to the model provider API call.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An LLMResult, which contains a list of candidate Generations for each input
|
||||||
|
prompt and additional model provider-specific output.
|
||||||
|
"""
|
||||||
if not isinstance(prompts, list):
|
if not isinstance(prompts, list):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Argument 'prompts' is expected to be of type List[str], received"
|
"Argument 'prompts' is expected to be of type List[str], received"
|
||||||
@ -754,7 +781,30 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
run_name: Optional[Union[str, List[str]]] = None,
|
run_name: Optional[Union[str, List[str]]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> LLMResult:
|
) -> LLMResult:
|
||||||
"""Run the LLM on the given prompt and input."""
|
"""Asynchronously pass a sequence of prompts to a model and return generations.
|
||||||
|
|
||||||
|
This method should make use of batched calls for models that expose a batched
|
||||||
|
API.
|
||||||
|
|
||||||
|
Use this method when you want to:
|
||||||
|
1. take advantage of batched calls,
|
||||||
|
2. need more output from the model than just the top generated value,
|
||||||
|
3. are building chains that are agnostic to the underlying language model
|
||||||
|
type (e.g., pure text completion models vs chat models).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompts: List of string prompts.
|
||||||
|
stop: Stop words to use when generating. Model output is cut off at the
|
||||||
|
first occurrence of any of these substrings.
|
||||||
|
callbacks: Callbacks to pass through. Used for executing additional
|
||||||
|
functionality, such as logging or streaming, throughout generation.
|
||||||
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
||||||
|
to the model provider API call.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An LLMResult, which contains a list of candidate Generations for each input
|
||||||
|
prompt and additional model provider-specific output.
|
||||||
|
"""
|
||||||
# Create callback managers
|
# Create callback managers
|
||||||
if isinstance(callbacks, list) and (
|
if isinstance(callbacks, list) and (
|
||||||
isinstance(callbacks[0], (list, BaseCallbackManager))
|
isinstance(callbacks[0], (list, BaseCallbackManager))
|
||||||
@ -927,6 +977,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
)
|
)
|
||||||
return result.generations[0][0].text
|
return result.generations[0][0].text
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
def predict(
|
def predict(
|
||||||
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
||||||
) -> str:
|
) -> str:
|
||||||
@ -936,6 +987,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
_stop = list(stop)
|
_stop = list(stop)
|
||||||
return self(text, stop=_stop, **kwargs)
|
return self(text, stop=_stop, **kwargs)
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
def predict_messages(
|
def predict_messages(
|
||||||
self,
|
self,
|
||||||
messages: List[BaseMessage],
|
messages: List[BaseMessage],
|
||||||
@ -951,6 +1003,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
content = self(text, stop=_stop, **kwargs)
|
content = self(text, stop=_stop, **kwargs)
|
||||||
return AIMessage(content=content)
|
return AIMessage(content=content)
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="ainvoke", removal="0.2.0")
|
||||||
async def apredict(
|
async def apredict(
|
||||||
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
||||||
) -> str:
|
) -> str:
|
||||||
@ -960,6 +1013,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|||||||
_stop = list(stop)
|
_stop = list(stop)
|
||||||
return await self._call_async(text, stop=_stop, **kwargs)
|
return await self._call_async(text, stop=_stop, **kwargs)
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="ainvoke", removal="0.2.0")
|
||||||
async def apredict_messages(
|
async def apredict_messages(
|
||||||
self,
|
self,
|
||||||
messages: List[BaseMessage],
|
messages: List[BaseMessage],
|
||||||
|
@ -5,9 +5,10 @@ import logging
|
|||||||
import warnings
|
import warnings
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional, Type, Union
|
from typing import Any, Dict, List, Optional, Type, Union, cast
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
from langchain_core._api import deprecated
|
||||||
from langchain_core.load.dump import dumpd
|
from langchain_core.load.dump import dumpd
|
||||||
from langchain_core.memory import BaseMemory
|
from langchain_core.memory import BaseMemory
|
||||||
from langchain_core.outputs import RunInfo
|
from langchain_core.outputs import RunInfo
|
||||||
@ -67,6 +68,43 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|||||||
chains and cannot return as rich of an output as `__call__`.
|
chains and cannot return as rich of an output as `__call__`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
memory: Optional[BaseMemory] = None
|
||||||
|
"""Optional memory object. Defaults to None.
|
||||||
|
Memory is a class that gets called at the start
|
||||||
|
and at the end of every chain. At the start, memory loads variables and passes
|
||||||
|
them along in the chain. At the end, it saves any returned variables.
|
||||||
|
There are many different types of memory - please see memory docs
|
||||||
|
for the full catalog."""
|
||||||
|
callbacks: Callbacks = Field(default=None, exclude=True)
|
||||||
|
"""Optional list of callback handlers (or callback manager). Defaults to None.
|
||||||
|
Callback handlers are called throughout the lifecycle of a call to a chain,
|
||||||
|
starting with on_chain_start, ending with on_chain_end or on_chain_error.
|
||||||
|
Each custom chain can optionally call additional callback methods, see Callback docs
|
||||||
|
for full details."""
|
||||||
|
verbose: bool = Field(default_factory=_get_verbosity)
|
||||||
|
"""Whether or not run in verbose mode. In verbose mode, some intermediate logs
|
||||||
|
will be printed to the console. Defaults to the global `verbose` value,
|
||||||
|
accessible via `langchain.globals.get_verbose()`."""
|
||||||
|
tags: Optional[List[str]] = None
|
||||||
|
"""Optional list of tags associated with the chain. Defaults to None.
|
||||||
|
These tags will be associated with each call to this chain,
|
||||||
|
and passed as arguments to the handlers defined in `callbacks`.
|
||||||
|
You can use these to eg identify a specific instance of a chain with its use case.
|
||||||
|
"""
|
||||||
|
metadata: Optional[Dict[str, Any]] = None
|
||||||
|
"""Optional metadata associated with the chain. Defaults to None.
|
||||||
|
This metadata will be associated with each call to this chain,
|
||||||
|
and passed as arguments to the handlers defined in `callbacks`.
|
||||||
|
You can use these to eg identify a specific instance of a chain with its use case.
|
||||||
|
"""
|
||||||
|
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
|
||||||
|
"""[DEPRECATED] Use `callbacks` instead."""
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
"""Configuration for this pydantic object."""
|
||||||
|
|
||||||
|
arbitrary_types_allowed = True
|
||||||
|
|
||||||
def get_input_schema(
|
def get_input_schema(
|
||||||
self, config: Optional[RunnableConfig] = None
|
self, config: Optional[RunnableConfig] = None
|
||||||
) -> Type[BaseModel]:
|
) -> Type[BaseModel]:
|
||||||
@ -90,14 +128,45 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
config = ensure_config(config)
|
config = ensure_config(config)
|
||||||
return self(
|
callbacks = config.get("callbacks")
|
||||||
input,
|
tags = config.get("tags")
|
||||||
callbacks=config.get("callbacks"),
|
metadata = config.get("metadata")
|
||||||
tags=config.get("tags"),
|
run_name = config.get("run_name")
|
||||||
metadata=config.get("metadata"),
|
include_run_info = kwargs.get("include_run_info", False)
|
||||||
run_name=config.get("run_name"),
|
return_only_outputs = kwargs.get("return_only_outputs", False)
|
||||||
**kwargs,
|
|
||||||
|
inputs = self.prep_inputs(input)
|
||||||
|
callback_manager = CallbackManager.configure(
|
||||||
|
callbacks,
|
||||||
|
self.callbacks,
|
||||||
|
self.verbose,
|
||||||
|
tags,
|
||||||
|
self.tags,
|
||||||
|
metadata,
|
||||||
|
self.metadata,
|
||||||
)
|
)
|
||||||
|
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
|
||||||
|
run_manager = callback_manager.on_chain_start(
|
||||||
|
dumpd(self),
|
||||||
|
inputs,
|
||||||
|
name=run_name,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
outputs = (
|
||||||
|
self._call(inputs, run_manager=run_manager)
|
||||||
|
if new_arg_supported
|
||||||
|
else self._call(inputs)
|
||||||
|
)
|
||||||
|
except BaseException as e:
|
||||||
|
run_manager.on_chain_error(e)
|
||||||
|
raise e
|
||||||
|
run_manager.on_chain_end(outputs)
|
||||||
|
final_outputs: Dict[str, Any] = self.prep_outputs(
|
||||||
|
inputs, outputs, return_only_outputs
|
||||||
|
)
|
||||||
|
if include_run_info:
|
||||||
|
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
|
||||||
|
return final_outputs
|
||||||
|
|
||||||
async def ainvoke(
|
async def ainvoke(
|
||||||
self,
|
self,
|
||||||
@ -106,51 +175,45 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
config = ensure_config(config)
|
config = ensure_config(config)
|
||||||
return await self.acall(
|
callbacks = config.get("callbacks")
|
||||||
input,
|
tags = config.get("tags")
|
||||||
callbacks=config.get("callbacks"),
|
metadata = config.get("metadata")
|
||||||
tags=config.get("tags"),
|
run_name = config.get("run_name")
|
||||||
metadata=config.get("metadata"),
|
include_run_info = kwargs.get("include_run_info", False)
|
||||||
run_name=config.get("run_name"),
|
return_only_outputs = kwargs.get("return_only_outputs", False)
|
||||||
**kwargs,
|
|
||||||
|
inputs = self.prep_inputs(input)
|
||||||
|
callback_manager = AsyncCallbackManager.configure(
|
||||||
|
callbacks,
|
||||||
|
self.callbacks,
|
||||||
|
self.verbose,
|
||||||
|
tags,
|
||||||
|
self.tags,
|
||||||
|
metadata,
|
||||||
|
self.metadata,
|
||||||
)
|
)
|
||||||
|
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
|
||||||
memory: Optional[BaseMemory] = None
|
run_manager = await callback_manager.on_chain_start(
|
||||||
"""Optional memory object. Defaults to None.
|
dumpd(self),
|
||||||
Memory is a class that gets called at the start
|
inputs,
|
||||||
and at the end of every chain. At the start, memory loads variables and passes
|
name=run_name,
|
||||||
them along in the chain. At the end, it saves any returned variables.
|
)
|
||||||
There are many different types of memory - please see memory docs
|
try:
|
||||||
for the full catalog."""
|
outputs = (
|
||||||
callbacks: Callbacks = Field(default=None, exclude=True)
|
await self._acall(inputs, run_manager=run_manager)
|
||||||
"""Optional list of callback handlers (or callback manager). Defaults to None.
|
if new_arg_supported
|
||||||
Callback handlers are called throughout the lifecycle of a call to a chain,
|
else await self._acall(inputs)
|
||||||
starting with on_chain_start, ending with on_chain_end or on_chain_error.
|
)
|
||||||
Each custom chain can optionally call additional callback methods, see Callback docs
|
except BaseException as e:
|
||||||
for full details."""
|
await run_manager.on_chain_error(e)
|
||||||
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
|
raise e
|
||||||
"""Deprecated, use `callbacks` instead."""
|
await run_manager.on_chain_end(outputs)
|
||||||
verbose: bool = Field(default_factory=_get_verbosity)
|
final_outputs: Dict[str, Any] = self.prep_outputs(
|
||||||
"""Whether or not run in verbose mode. In verbose mode, some intermediate logs
|
inputs, outputs, return_only_outputs
|
||||||
will be printed to the console. Defaults to the global `verbose` value,
|
)
|
||||||
accessible via `langchain.globals.get_verbose()`."""
|
if include_run_info:
|
||||||
tags: Optional[List[str]] = None
|
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
|
||||||
"""Optional list of tags associated with the chain. Defaults to None.
|
return final_outputs
|
||||||
These tags will be associated with each call to this chain,
|
|
||||||
and passed as arguments to the handlers defined in `callbacks`.
|
|
||||||
You can use these to eg identify a specific instance of a chain with its use case.
|
|
||||||
"""
|
|
||||||
metadata: Optional[Dict[str, Any]] = None
|
|
||||||
"""Optional metadata associated with the chain. Defaults to None.
|
|
||||||
This metadata will be associated with each call to this chain,
|
|
||||||
and passed as arguments to the handlers defined in `callbacks`.
|
|
||||||
You can use these to eg identify a specific instance of a chain with its use case.
|
|
||||||
"""
|
|
||||||
|
|
||||||
class Config:
|
|
||||||
"""Configuration for this pydantic object."""
|
|
||||||
|
|
||||||
arbitrary_types_allowed = True
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _chain_type(self) -> str:
|
def _chain_type(self) -> str:
|
||||||
@ -253,6 +316,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|||||||
None, self._call, inputs, run_manager.get_sync() if run_manager else None
|
None, self._call, inputs, run_manager.get_sync() if run_manager else None
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
def __call__(
|
def __call__(
|
||||||
self,
|
self,
|
||||||
inputs: Union[Dict[str, Any], Any],
|
inputs: Union[Dict[str, Any], Any],
|
||||||
@ -289,39 +353,21 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|||||||
A dict of named outputs. Should contain all outputs specified in
|
A dict of named outputs. Should contain all outputs specified in
|
||||||
`Chain.output_keys`.
|
`Chain.output_keys`.
|
||||||
"""
|
"""
|
||||||
inputs = self.prep_inputs(inputs)
|
config = {
|
||||||
callback_manager = CallbackManager.configure(
|
"callbacks": callbacks,
|
||||||
callbacks,
|
"tags": tags,
|
||||||
self.callbacks,
|
"metadata": metadata,
|
||||||
self.verbose,
|
"run_name": run_name,
|
||||||
tags,
|
}
|
||||||
self.tags,
|
|
||||||
metadata,
|
|
||||||
self.metadata,
|
|
||||||
)
|
|
||||||
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
|
|
||||||
run_manager = callback_manager.on_chain_start(
|
|
||||||
dumpd(self),
|
|
||||||
inputs,
|
|
||||||
name=run_name,
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
outputs = (
|
|
||||||
self._call(inputs, run_manager=run_manager)
|
|
||||||
if new_arg_supported
|
|
||||||
else self._call(inputs)
|
|
||||||
)
|
|
||||||
except BaseException as e:
|
|
||||||
run_manager.on_chain_error(e)
|
|
||||||
raise e
|
|
||||||
run_manager.on_chain_end(outputs)
|
|
||||||
final_outputs: Dict[str, Any] = self.prep_outputs(
|
|
||||||
inputs, outputs, return_only_outputs
|
|
||||||
)
|
|
||||||
if include_run_info:
|
|
||||||
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
|
|
||||||
return final_outputs
|
|
||||||
|
|
||||||
|
return self.invoke(
|
||||||
|
inputs,
|
||||||
|
cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}),
|
||||||
|
return_only_outputs=return_only_outputs,
|
||||||
|
include_run_info=include_run_info,
|
||||||
|
)
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="ainvoke", removal="0.2.0")
|
||||||
async def acall(
|
async def acall(
|
||||||
self,
|
self,
|
||||||
inputs: Union[Dict[str, Any], Any],
|
inputs: Union[Dict[str, Any], Any],
|
||||||
@ -358,38 +404,18 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|||||||
A dict of named outputs. Should contain all outputs specified in
|
A dict of named outputs. Should contain all outputs specified in
|
||||||
`Chain.output_keys`.
|
`Chain.output_keys`.
|
||||||
"""
|
"""
|
||||||
inputs = self.prep_inputs(inputs)
|
config = {
|
||||||
callback_manager = AsyncCallbackManager.configure(
|
"callbacks": callbacks,
|
||||||
callbacks,
|
"tags": tags,
|
||||||
self.callbacks,
|
"metadata": metadata,
|
||||||
self.verbose,
|
"run_name": run_name,
|
||||||
tags,
|
}
|
||||||
self.tags,
|
return await self.ainvoke(
|
||||||
metadata,
|
|
||||||
self.metadata,
|
|
||||||
)
|
|
||||||
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
|
|
||||||
run_manager = await callback_manager.on_chain_start(
|
|
||||||
dumpd(self),
|
|
||||||
inputs,
|
inputs,
|
||||||
name=run_name,
|
cast(RunnableConfig, {k: v for k, v in config.items() if k is not None}),
|
||||||
|
return_only_outputs=return_only_outputs,
|
||||||
|
include_run_info=include_run_info,
|
||||||
)
|
)
|
||||||
try:
|
|
||||||
outputs = (
|
|
||||||
await self._acall(inputs, run_manager=run_manager)
|
|
||||||
if new_arg_supported
|
|
||||||
else await self._acall(inputs)
|
|
||||||
)
|
|
||||||
except BaseException as e:
|
|
||||||
await run_manager.on_chain_error(e)
|
|
||||||
raise e
|
|
||||||
await run_manager.on_chain_end(outputs)
|
|
||||||
final_outputs: Dict[str, Any] = self.prep_outputs(
|
|
||||||
inputs, outputs, return_only_outputs
|
|
||||||
)
|
|
||||||
if include_run_info:
|
|
||||||
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
|
|
||||||
return final_outputs
|
|
||||||
|
|
||||||
def prep_outputs(
|
def prep_outputs(
|
||||||
self,
|
self,
|
||||||
@ -458,6 +484,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|||||||
)
|
)
|
||||||
return self.output_keys[0]
|
return self.output_keys[0]
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="invoke", removal="0.2.0")
|
||||||
def run(
|
def run(
|
||||||
self,
|
self,
|
||||||
*args: Any,
|
*args: Any,
|
||||||
@ -528,6 +555,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|||||||
f" but not both. Got args: {args} and kwargs: {kwargs}."
|
f" but not both. Got args: {args} and kwargs: {kwargs}."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="ainvoke", removal="0.2.0")
|
||||||
async def arun(
|
async def arun(
|
||||||
self,
|
self,
|
||||||
*args: Any,
|
*args: Any,
|
||||||
@ -665,6 +693,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|||||||
else:
|
else:
|
||||||
raise ValueError(f"{save_path} must be json or yaml")
|
raise ValueError(f"{save_path} must be json or yaml")
|
||||||
|
|
||||||
|
@deprecated("0.1.0", alternative="batch", removal="0.2.0")
|
||||||
def apply(
|
def apply(
|
||||||
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
|
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
|
||||||
) -> List[Dict[str, str]]:
|
) -> List[Dict[str, str]]:
|
||||||
|
Loading…
Reference in New Issue
Block a user