core: Add ruff rules for error messages (EM) (#26965)

All auto-fixes

Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Christophe Bornet 2024-10-08 00:12:28 +02:00 committed by GitHub
parent 37ca468d03
commit d31ec8810a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
94 changed files with 777 additions and 523 deletions

View File

@ -51,15 +51,18 @@ def _validate_deprecation_params(
) -> None:
"""Validate the deprecation parameters."""
if pending and removal:
raise ValueError("A pending deprecation cannot have a scheduled removal")
msg = "A pending deprecation cannot have a scheduled removal"
raise ValueError(msg)
if alternative and alternative_import:
raise ValueError("Cannot specify both alternative and alternative_import")
msg = "Cannot specify both alternative and alternative_import"
raise ValueError(msg)
if alternative_import and "." not in alternative_import:
raise ValueError(
msg = (
"alternative_import must be a fully qualified module path. Got "
f" {alternative_import}"
)
raise ValueError(msg)
def deprecated(
@ -222,7 +225,8 @@ def deprecated(
if not _obj_type:
_obj_type = "attribute"
if not _name:
raise ValueError(f"Field {obj} must have a name to be deprecated.")
msg = f"Field {obj} must have a name to be deprecated."
raise ValueError(msg)
old_doc = obj.description
def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
@ -241,7 +245,8 @@ def deprecated(
if not _obj_type:
_obj_type = "attribute"
if not _name:
raise ValueError(f"Field {obj} must have a name to be deprecated.")
msg = f"Field {obj} must have a name to be deprecated."
raise ValueError(msg)
old_doc = obj.description
def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
@ -428,10 +433,11 @@ def warn_deprecated(
if not pending:
if not removal:
removal = f"in {removal}" if removal else "within ?? minor releases"
raise NotImplementedError(
msg = (
f"Need to determine which default deprecation schedule to use. "
f"{removal}"
)
raise NotImplementedError(msg)
else:
removal = f"in {removal}"
@ -523,9 +529,8 @@ def rename_parameter(
@functools.wraps(f)
def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R:
if new in kwargs and old in kwargs:
raise TypeError(
f"{f.__name__}() got multiple values for argument {new!r}"
)
msg = f"{f.__name__}() got multiple values for argument {new!r}"
raise TypeError(msg)
if old in kwargs:
warn_deprecated(
since,

View File

@ -59,7 +59,8 @@ def _key_from_id(id_: str) -> str:
elif wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_SET):
return wout_prefix[: -len(CONTEXT_CONFIG_SUFFIX_SET)]
else:
raise ValueError(f"Invalid context config id {id_}")
msg = f"Invalid context config id {id_}"
raise ValueError(msg)
def _config_with_context(
@ -103,16 +104,15 @@ def _config_with_context(
for dep in deps_by_key[key]:
if key in deps_by_key[dep]:
raise ValueError(
f"Deadlock detected between context keys {key} and {dep}"
)
msg = f"Deadlock detected between context keys {key} and {dep}"
raise ValueError(msg)
if len(setters) != 1:
raise ValueError(f"Expected exactly one setter for context key {key}")
msg = f"Expected exactly one setter for context key {key}"
raise ValueError(msg)
setter_idx = setters[0][1]
if any(getter_idx < setter_idx for _, getter_idx in getters):
raise ValueError(
f"Context setter for key {key} must be defined after all getters."
)
msg = f"Context setter for key {key} must be defined after all getters."
raise ValueError(msg)
if getters:
context_funcs[getters[0][0].id] = partial(getter, events[key], values)
@ -271,9 +271,8 @@ class ContextSet(RunnableSerializable):
if spec.id.endswith(CONTEXT_CONFIG_SUFFIX_GET):
getter_key = spec.id.split("/")[1]
if getter_key in self.keys:
raise ValueError(
f"Circular reference in context setter for key {getter_key}"
)
msg = f"Circular reference in context setter for key {getter_key}"
raise ValueError(msg)
return super().config_specs + [
ConfigurableFieldSpec(
id=id_,

View File

@ -160,7 +160,8 @@ class InMemoryCache(BaseCache):
"""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
if maxsize is not None and maxsize <= 0:
raise ValueError("maxsize must be greater than 0")
msg = "maxsize must be greater than 0"
raise ValueError(msg)
self._maxsize = maxsize
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:

View File

@ -275,9 +275,8 @@ class CallbackManagerMixin:
"""
# NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown
raise NotImplementedError(
f"{self.__class__.__name__} does not implement `on_chat_model_start`"
)
msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`"
raise NotImplementedError(msg)
def on_retriever_start(
self,
@ -523,9 +522,8 @@ class AsyncCallbackHandler(BaseCallbackHandler):
"""
# NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown
raise NotImplementedError(
f"{self.__class__.__name__} does not implement `on_chat_model_start`"
)
msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`"
raise NotImplementedError(msg)
async def on_llm_new_token(
self,

View File

@ -1510,11 +1510,12 @@ class CallbackManager(BaseCallbackManager):
.. versionadded:: 0.2.14
"""
if kwargs:
raise ValueError(
msg = (
"The dispatcher API does not accept additional keyword arguments."
"Please do not pass any additional keyword arguments, instead "
"include them in the data field."
)
raise ValueError(msg)
if run_id is None:
run_id = uuid.uuid4()
@ -1989,11 +1990,12 @@ class AsyncCallbackManager(BaseCallbackManager):
run_id = uuid.uuid4()
if kwargs:
raise ValueError(
msg = (
"The dispatcher API does not accept additional keyword arguments."
"Please do not pass any additional keyword arguments, instead "
"include them in the data field."
)
raise ValueError(msg)
await ahandle_event(
self.handlers,
"on_custom_event",
@ -2336,11 +2338,12 @@ def _configure(
if v1_tracing_enabled_ and not tracing_v2_enabled_:
# if both are enabled, can silently ignore the v1 tracer
raise RuntimeError(
msg = (
"Tracing using LangChainTracerV1 is no longer supported. "
"Please set the LANGCHAIN_TRACING_V2 environment variable to enable "
"tracing instead."
)
raise RuntimeError(msg)
tracer_project = _get_tracer_project()
debug = _get_debug()
@ -2519,13 +2522,14 @@ async def adispatch_custom_event(
# within a tool or a lambda and have the metadata events associated
# with the parent run rather than have a new run id generated for each.
if callback_manager.parent_run_id is None:
raise RuntimeError(
msg = (
"Unable to dispatch an adhoc event without a parent run id."
"This function can only be called from within an existing run (e.g.,"
"inside a tool or a RunnableLambda or a RunnableGenerator.)"
"If you are doing that and still seeing this error, try explicitly"
"passing the config parameter to this function."
)
raise RuntimeError(msg)
await callback_manager.on_custom_event(
name,
@ -2588,13 +2592,14 @@ def dispatch_custom_event(
# within a tool or a lambda and have the metadata events associated
# with the parent run rather than have a new run id generated for each.
if callback_manager.parent_run_id is None:
raise RuntimeError(
msg = (
"Unable to dispatch an adhoc event without a parent run id."
"This function can only be called from within an existing run (e.g.,"
"inside a tool or a RunnableLambda or a RunnableGenerator.)"
"If you are doing that and still seeing this error, try explicitly"
"passing the config parameter to this function."
)
raise RuntimeError(msg)
callback_manager.on_custom_event(
name,
data,

View File

@ -157,10 +157,11 @@ class BaseChatMessageHistory(ABC):
# method, so we should use it.
self.add_messages([message])
else:
raise NotImplementedError(
msg = (
"add_message is not implemented for this class. "
"Please implement add_message or add_messages."
)
raise NotImplementedError(msg)
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
"""Add a list of messages.

View File

@ -53,11 +53,12 @@ class BaseLoader(ABC): # noqa: B024
try:
from langchain_text_splitters import RecursiveCharacterTextSplitter
except ImportError as e:
raise ImportError(
msg = (
"Unable to import from langchain_text_splitters. Please specify "
"text_splitter or install langchain_text_splitters with "
"`pip install -U langchain-text-splitters`."
) from e
)
raise ImportError(msg) from e
_text_splitter: TextSplitter = RecursiveCharacterTextSplitter()
else:
@ -71,9 +72,8 @@ class BaseLoader(ABC): # noqa: B024
"""A lazy loader for Documents."""
if type(self).load != BaseLoader.load:
return iter(self.load())
raise NotImplementedError(
f"{self.__class__.__name__} does not implement lazy_load()"
)
msg = f"{self.__class__.__name__} does not implement lazy_load()"
raise NotImplementedError(msg)
async def alazy_load(self) -> AsyncIterator[Document]:
"""A lazy loader for Documents."""

View File

@ -142,7 +142,8 @@ class Blob(BaseMedia):
def check_blob_is_valid(cls, values: dict[str, Any]) -> Any:
"""Verify that either data or path is provided."""
if "data" not in values and "path" not in values:
raise ValueError("Either data or path must be provided")
msg = "Either data or path must be provided"
raise ValueError(msg)
return values
def as_string(self) -> str:
@ -155,7 +156,8 @@ class Blob(BaseMedia):
elif isinstance(self.data, str):
return self.data
else:
raise ValueError(f"Unable to get string for blob {self}")
msg = f"Unable to get string for blob {self}"
raise ValueError(msg)
def as_bytes(self) -> bytes:
"""Read data as bytes."""
@ -167,7 +169,8 @@ class Blob(BaseMedia):
with open(str(self.path), "rb") as f:
return f.read()
else:
raise ValueError(f"Unable to get bytes for blob {self}")
msg = f"Unable to get bytes for blob {self}"
raise ValueError(msg)
@contextlib.contextmanager
def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]:
@ -178,7 +181,8 @@ class Blob(BaseMedia):
with open(str(self.path), "rb") as f:
yield f
else:
raise NotImplementedError(f"Unable to convert blob {self}")
msg = f"Unable to convert blob {self}"
raise NotImplementedError(msg)
@classmethod
def from_path(

View File

@ -41,10 +41,11 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
):
super().__init__(error)
if send_to_llm and (observation is None or llm_output is None):
raise ValueError(
msg = (
"Arguments 'observation' & 'llm_output'"
" are required if 'send_to_llm' is True"
)
raise ValueError(msg)
self.observation = observation
self.llm_output = llm_output
self.send_to_llm = send_to_llm

View File

@ -73,20 +73,22 @@ class _HashedDocument(Document):
for key in forbidden_keys:
if key in metadata:
raise ValueError(
msg = (
f"Metadata cannot contain key {key} as it "
f"is reserved for internal use."
)
raise ValueError(msg)
content_hash = str(_hash_string_to_uuid(content))
try:
metadata_hash = str(_hash_nested_dict_to_uuid(metadata))
except Exception as e:
raise ValueError(
msg = (
f"Failed to hash metadata: {e}. "
f"Please use a dict that can be serialized using json."
) from e
)
raise ValueError(msg) from e
values["content_hash"] = content_hash
values["metadata_hash"] = metadata_hash
@ -154,10 +156,11 @@ def _get_source_id_assigner(
elif callable(source_id_key):
return source_id_key
else:
raise ValueError(
msg = (
f"source_id_key should be either None, a string or a callable. "
f"Got {source_id_key} of type {type(source_id_key)}."
)
raise ValueError(msg)
def _deduplicate_in_order(
@ -269,13 +272,15 @@ def index(
ValueError: If source_id_key is not None, but is not a string or callable.
"""
if cleanup not in {"incremental", "full", None}:
raise ValueError(
msg = (
f"cleanup should be one of 'incremental', 'full' or None. "
f"Got {cleanup}."
)
raise ValueError(msg)
if cleanup == "incremental" and source_id_key is None:
raise ValueError("Source id key is required when cleanup mode is incremental.")
msg = "Source id key is required when cleanup mode is incremental."
raise ValueError(msg)
destination = vector_store # Renaming internally for clarity
@ -286,21 +291,24 @@ def index(
for method in methods:
if not hasattr(destination, method):
raise ValueError(
msg = (
f"Vectorstore {destination} does not have required method {method}"
)
raise ValueError(msg)
if type(destination).delete == VectorStore.delete:
# Checking if the vectorstore has overridden the default delete method
# implementation which just raises a NotImplementedError
raise ValueError("Vectorstore has not implemented the delete method")
msg = "Vectorstore has not implemented the delete method"
raise ValueError(msg)
elif isinstance(destination, DocumentIndex):
pass
else:
raise TypeError(
msg = (
f"Vectorstore should be either a VectorStore or a DocumentIndex. "
f"Got {type(destination)}."
)
raise TypeError(msg)
if isinstance(docs_source, BaseLoader):
try:
@ -334,12 +342,13 @@ def index(
# If the cleanup mode is incremental, source ids are required.
for source_id, hashed_doc in zip(source_ids, hashed_docs):
if source_id is None:
raise ValueError(
msg = (
"Source ids are required when cleanup mode is incremental. "
f"Document that starts with "
f"content: {hashed_doc.page_content[:100]} was not assigned "
f"as source id."
)
raise ValueError(msg)
# source ids cannot be None after for loop above.
source_ids = cast(Sequence[str], source_ids) # type: ignore[assignment]
@ -400,7 +409,8 @@ def index(
# mypy isn't good enough to determine that source ids cannot be None
# here due to a check that's happening above, so we check again.
if any(source_id is None for source_id in source_ids):
raise AssertionError("Source ids cannot be if cleanup=='incremental'.")
msg = "Source ids cannot be if cleanup=='incremental'."
raise AssertionError(msg)
indexed_source_ids = cast(
Sequence[str], [source_id_assigner(doc) for doc in docs_to_index]
@ -514,13 +524,15 @@ async def aindex(
"""
if cleanup not in {"incremental", "full", None}:
raise ValueError(
msg = (
f"cleanup should be one of 'incremental', 'full' or None. "
f"Got {cleanup}."
)
raise ValueError(msg)
if cleanup == "incremental" and source_id_key is None:
raise ValueError("Source id key is required when cleanup mode is incremental.")
msg = "Source id key is required when cleanup mode is incremental."
raise ValueError(msg)
destination = vector_store # Renaming internally for clarity
@ -532,21 +544,24 @@ async def aindex(
for method in methods:
if not hasattr(destination, method):
raise ValueError(
msg = (
f"Vectorstore {destination} does not have required method {method}"
)
raise ValueError(msg)
if type(destination).adelete == VectorStore.adelete:
# Checking if the vectorstore has overridden the default delete method
# implementation which just raises a NotImplementedError
raise ValueError("Vectorstore has not implemented the delete method")
msg = "Vectorstore has not implemented the delete method"
raise ValueError(msg)
elif isinstance(destination, DocumentIndex):
pass
else:
raise TypeError(
msg = (
f"Vectorstore should be either a VectorStore or a DocumentIndex. "
f"Got {type(destination)}."
)
raise TypeError(msg)
async_doc_iterator: AsyncIterator[Document]
if isinstance(docs_source, BaseLoader):
try:
@ -588,12 +603,13 @@ async def aindex(
# If the cleanup mode is incremental, source ids are required.
for source_id, hashed_doc in zip(source_ids, hashed_docs):
if source_id is None:
raise ValueError(
msg = (
"Source ids are required when cleanup mode is incremental. "
f"Document that starts with "
f"content: {hashed_doc.page_content[:100]} was not assigned "
f"as source id."
)
raise ValueError(msg)
# source ids cannot be None after for loop above.
source_ids = cast(Sequence[str], source_ids)
@ -654,7 +670,8 @@ async def aindex(
# mypy isn't good enough to determine that source ids cannot be None
# here due to a check that's happening above, so we check again.
if any(source_id is None for source_id in source_ids):
raise AssertionError("Source ids cannot be if cleanup=='incremental'.")
msg = "Source ids cannot be if cleanup=='incremental'."
raise AssertionError(msg)
indexed_source_ids = cast(
Sequence[str], [source_id_assigner(doc) for doc in docs_to_index]

View File

@ -290,11 +290,13 @@ class InMemoryRecordManager(RecordManager):
"""
if group_ids and len(keys) != len(group_ids):
raise ValueError("Length of keys must match length of group_ids")
msg = "Length of keys must match length of group_ids"
raise ValueError(msg)
for index, key in enumerate(keys):
group_id = group_ids[index] if group_ids else None
if time_at_least and time_at_least > self.get_time():
raise ValueError("time_at_least must be in the past")
msg = "time_at_least must be in the past"
raise ValueError(msg)
self.records[key] = {"group_id": group_id, "updated_at": self.get_time()}
async def aupdate(

View File

@ -47,7 +47,8 @@ class InMemoryDocumentIndex(DocumentIndex):
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID."""
if ids is None:
raise ValueError("IDs must be provided for deletion")
msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = []

View File

@ -60,11 +60,12 @@ def get_tokenizer() -> Any:
try:
from transformers import GPT2TokenizerFast # type: ignore[import]
except ImportError as e:
raise ImportError(
msg = (
"Could not import transformers python package. "
"This is needed in order to calculate get_token_ids. "
"Please install it with `pip install transformers`."
) from e
)
raise ImportError(msg) from e
# create a GPT-2 tokenizer instance
return GPT2TokenizerFast.from_pretrained("gpt2")

View File

@ -89,7 +89,8 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
if generation:
generation += list(stream)
if generation is None:
raise ValueError("No generations found in stream.")
msg = "No generations found in stream."
raise ValueError(msg)
return ChatResult(
generations=[
ChatGeneration(
@ -265,10 +266,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
elif isinstance(input, Sequence):
return ChatPromptValue(messages=convert_to_messages(input))
else:
raise ValueError(
msg = (
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
raise ValueError(msg)
def invoke(
self,
@ -817,9 +819,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
elif self.cache is None:
pass
else:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
msg = "Asked to cache, but no cache found at `langchain.cache`."
raise ValueError(msg)
# Apply the rate limiter after checking the cache, since
# we usually don't want to rate limit cache lookups, but
@ -891,9 +892,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
elif self.cache is None:
pass
else:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
msg = "Asked to cache, but no cache found at `langchain.cache`."
raise ValueError(msg)
# Apply the rate limiter after checking the cache, since
# we usually don't want to rate limit cache lookups, but
@ -1020,7 +1020,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
if isinstance(generation, ChatGeneration):
return generation.message
else:
raise ValueError("Unexpected generation type")
msg = "Unexpected generation type"
raise ValueError(msg)
async def _call_async(
self,
@ -1036,7 +1037,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
if isinstance(generation, ChatGeneration):
return generation.message
else:
raise ValueError("Unexpected generation type")
msg = "Unexpected generation type"
raise ValueError(msg)
@deprecated("0.1.7", alternative="invoke", removal="1.0")
def call_as_llm(
@ -1053,7 +1055,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
if isinstance(result.content, str):
return result.content
else:
raise ValueError("Cannot use predict when output is not a string.")
msg = "Cannot use predict when output is not a string."
raise ValueError(msg)
@deprecated("0.1.7", alternative="invoke", removal="1.0")
def predict_messages(
@ -1077,7 +1080,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
if isinstance(result.content, str):
return result.content
else:
raise ValueError("Cannot use predict when output is not a string.")
msg = "Cannot use predict when output is not a string."
raise ValueError(msg)
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
async def apredict_messages(
@ -1220,7 +1224,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
# }
""" # noqa: E501
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
msg = f"Received unsupported arguments {kwargs}"
raise ValueError(msg)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
@ -1228,9 +1233,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
)
if self.bind_tools is BaseChatModel.bind_tools:
raise NotImplementedError(
"with_structured_output is not implemented for this model."
)
msg = "with_structured_output is not implemented for this model."
raise NotImplementedError(msg)
llm = self.bind_tools([schema], tool_choice="any")
if isinstance(schema, type) and is_basemodel_subclass(schema):
output_parser: OutputParserLike = PydanticToolsParser(

View File

@ -238,18 +238,20 @@ class GenericFakeChatModel(BaseChatModel):
messages, stop=stop, run_manager=run_manager, **kwargs
)
if not isinstance(chat_result, ChatResult):
raise ValueError(
msg = (
f"Expected generate to return a ChatResult, "
f"but got {type(chat_result)} instead."
)
raise ValueError(msg)
message = chat_result.generations[0].message
if not isinstance(message, AIMessage):
raise ValueError(
msg = (
f"Expected invoke to return an AIMessage, "
f"but got {type(message)} instead."
)
raise ValueError(msg)
content = message.content

View File

@ -135,15 +135,17 @@ def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
elif cache is True:
llm_cache = get_llm_cache()
if llm_cache is None:
raise ValueError(
msg = (
"No global cache was configured. Use `set_llm_cache`."
"to set a global cache if you want to use a global cache."
"Otherwise either pass a cache object or set cache to False/None"
)
raise ValueError(msg)
elif cache is False:
llm_cache = None
else:
raise ValueError(f"Unsupported cache value {cache}")
msg = f"Unsupported cache value {cache}"
raise ValueError(msg)
return llm_cache
@ -332,10 +334,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
elif isinstance(input, Sequence):
return ChatPromptValue(messages=convert_to_messages(input))
else:
raise ValueError(
msg = (
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
raise ValueError(msg)
def _get_ls_params(
self,
@ -842,10 +845,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
prompt and additional model provider-specific output.
"""
if not isinstance(prompts, list):
raise ValueError(
msg = (
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
raise ValueError(msg)
# Create callback managers
if isinstance(metadata, list):
metadata = [
@ -989,10 +993,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
return [None] * len(prompts)
if isinstance(run_id, list):
if len(run_id) != len(prompts):
raise ValueError(
msg = (
"Number of manually provided run_id's does not match batch length."
f" {len(run_id)} != {len(prompts)}"
)
raise ValueError(msg)
return run_id
return [run_id] + [None] * (len(prompts) - 1)
@ -1262,11 +1267,12 @@ class BaseLLM(BaseLanguageModel[str], ABC):
ValueError: If the prompt is not a string.
"""
if not isinstance(prompt, str):
raise ValueError(
msg = (
"Argument `prompt` is expected to be a string. Instead found "
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
"`generate` instead."
)
raise ValueError(msg)
return (
self.generate(
[prompt],
@ -1387,7 +1393,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
class LLM(BaseLLM):

View File

@ -37,7 +37,8 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
ValueError: If `default` is passed as a kwarg.
"""
if "default" in kwargs:
raise ValueError("`default` should not be passed to dumps")
msg = "`default` should not be passed to dumps"
raise ValueError(msg)
try:
if pretty:
indent = kwargs.pop("indent", 2)

View File

@ -96,17 +96,19 @@ class Reviver:
else:
if self.secrets_from_env and key in os.environ and os.environ[key]:
return os.environ[key]
raise KeyError(f'Missing key "{key}" in load(secrets_map)')
msg = f'Missing key "{key}" in load(secrets_map)'
raise KeyError(msg)
if (
value.get("lc") == 1
and value.get("type") == "not_implemented"
and value.get("id") is not None
):
raise NotImplementedError(
msg = (
"Trying to load an object that doesn't implement "
f"serialization: {value}"
)
raise NotImplementedError(msg)
if (
value.get("lc") == 1
@ -121,7 +123,8 @@ class Reviver:
# The root namespace ["langchain"] is not a valid identifier.
or namespace == ["langchain"]
):
raise ValueError(f"Invalid namespace: {value}")
msg = f"Invalid namespace: {value}"
raise ValueError(msg)
# Has explicit import path.
elif mapping_key in self.import_mappings:
import_path = self.import_mappings[mapping_key]
@ -130,11 +133,12 @@ class Reviver:
# Import module
mod = importlib.import_module(".".join(import_dir))
elif namespace[0] in DISALLOW_LOAD_FROM_PATH:
raise ValueError(
msg = (
"Trying to deserialize something that cannot "
"be deserialized in current version of langchain-core: "
f"{mapping_key}."
)
raise ValueError(msg)
# Otherwise, treat namespace as path.
else:
mod = importlib.import_module(".".join(namespace))
@ -143,7 +147,8 @@ class Reviver:
# The class must be a subclass of Serializable.
if not issubclass(cls, Serializable):
raise ValueError(f"Invalid namespace: {value}")
msg = f"Invalid namespace: {value}"
raise ValueError(msg)
# We don't need to recurse on kwargs
# as json.loads will do that for us.

View File

@ -215,11 +215,12 @@ class Serializable(BaseModel, ABC):
for attr in deprecated_attributes:
if hasattr(cls, attr):
raise ValueError(
msg = (
f"Class {self.__class__} has a deprecated "
f"attribute {attr}. Please use the corresponding "
f"classmethod instead."
)
raise ValueError(msg)
# Get a reference to self bound to each class in the MRO
this = cast(Serializable, self if cls is None else super(cls, self))

View File

@ -373,7 +373,8 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
)
)
else:
raise ValueError("Malformed args.")
msg = "Malformed args."
raise ValueError(msg)
except Exception:
invalid_tool_calls.append(
create_invalid_tool_call(
@ -402,9 +403,8 @@ def add_ai_message_chunks(
) -> AIMessageChunk:
"""Add multiple AIMessageChunks together."""
if any(left.example != o.example for o in others):
raise ValueError(
"Cannot concatenate AIMessageChunks with different example values."
)
msg = "Cannot concatenate AIMessageChunks with different example values."
raise ValueError(msg)
content = merge_content(left.content, *(o.content for o in others))
additional_kwargs = merge_dicts(

View File

@ -223,11 +223,12 @@ class BaseMessageChunk(BaseMessage):
response_metadata=response_metadata,
)
else:
raise TypeError(
msg = (
'unsupported operand type(s) for +: "'
f"{self.__class__.__name__}"
f'" and "{other.__class__.__name__}"'
)
raise TypeError(msg)
def message_to_dict(message: BaseMessage) -> dict:

View File

@ -48,9 +48,8 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
raise ValueError(
"Cannot concatenate ChatMessageChunks with different roles."
)
msg = "Cannot concatenate ChatMessageChunks with different roles."
raise ValueError(msg)
return self.__class__(
role=self.role,

View File

@ -54,9 +54,8 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
raise ValueError(
"Cannot concatenate FunctionMessageChunks with different names."
)
msg = "Cannot concatenate FunctionMessageChunks with different names."
raise ValueError(msg)
return self.__class__(
name=self.name,

View File

@ -20,7 +20,8 @@ class RemoveMessage(BaseMessage):
ValueError: If the 'content' field is passed in kwargs.
"""
if kwargs.pop("content", None):
raise ValueError("RemoveMessage does not support 'content' field.")
msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
return super().__init__("", id=id, **kwargs)

View File

@ -94,11 +94,12 @@ class ToolMessage(BaseMessage):
try:
values["content"] = str(content)
except ValueError as e:
raise ValueError(
msg = (
"ToolMessage content should be a string or a list of string/dicts. "
f"Received:\n\n{content=}\n\n which could not be coerced into a "
"string."
) from e
)
raise ValueError(msg) from e
elif isinstance(content, list):
values["content"] = []
for i, x in enumerate(content):
@ -106,12 +107,13 @@ class ToolMessage(BaseMessage):
try:
values["content"].append(str(x))
except ValueError as e:
raise ValueError(
msg = (
"ToolMessage content should be a string or a list of "
"string/dicts. Received a list but "
f"element ToolMessage.content[{i}] is not a dict and could "
f"not be coerced to a string.:\n\n{x}"
) from e
)
raise ValueError(msg) from e
else:
values["content"].append(x)
else:
@ -147,9 +149,8 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ToolMessageChunk):
if self.tool_call_id != other.tool_call_id:
raise ValueError(
"Cannot concatenate ToolMessageChunks with different names."
)
msg = "Cannot concatenate ToolMessageChunks with different names."
raise ValueError(msg)
return self.__class__(
tool_call_id=self.tool_call_id,

View File

@ -51,10 +51,11 @@ def _get_type(v: Any) -> str:
elif hasattr(v, "type"):
return v.type
else:
raise TypeError(
msg = (
f"Expected either a dictionary with a 'type' key or an object "
f"with a 'type' attribute. Instead got type {type(v)}."
)
raise TypeError(msg)
AnyMessage = Annotated[
@ -120,7 +121,8 @@ def get_buffer_string(
elif isinstance(m, ChatMessage):
role = m.role
else:
raise ValueError(f"Got unsupported message type: {m}")
msg = f"Got unsupported message type: {m}"
raise ValueError(msg)
message = f"{role}: {m.content}"
if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs:
message += f"{m.additional_kwargs['function_call']}"
@ -158,7 +160,8 @@ def _message_from_dict(message: dict) -> BaseMessage:
elif _type == "ChatMessageChunk":
return ChatMessageChunk(**message["data"])
else:
raise ValueError(f"Got unexpected message type: {_type}")
msg = f"Got unexpected message type: {_type}"
raise ValueError(msg)
def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]:
@ -266,10 +269,11 @@ def _create_message_from_message_type(
elif message_type == "remove":
message = RemoveMessage(**kwargs)
else:
raise ValueError(
msg = (
f"Unexpected message type: '{message_type}'. Use one of 'human',"
f" 'user', 'ai', 'assistant', 'function', 'tool', or 'system'."
)
raise ValueError(msg)
return message
@ -312,14 +316,14 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
# None msg content is not allowed
msg_content = msg_kwargs.pop("content") or ""
except KeyError as e:
raise ValueError(
f"Message dict must contain 'role' and 'content' keys, got {message}"
) from e
msg = f"Message dict must contain 'role' and 'content' keys, got {message}"
raise ValueError(msg) from e
_message = _create_message_from_message_type(
msg_type, msg_content, **msg_kwargs
)
else:
raise NotImplementedError(f"Unsupported message type: {type(message)}")
msg = f"Unsupported message type: {type(message)}"
raise NotImplementedError(msg)
return _message
@ -820,11 +824,12 @@ def trim_messages(
else:
list_token_counter = token_counter # type: ignore[assignment]
else:
raise ValueError(
msg = (
f"'token_counter' expected to be a model that implements "
f"'get_num_tokens_from_messages()' or a function. Received object of type "
f"{type(token_counter)}."
)
raise ValueError(msg)
try:
from langchain_text_splitters import TextSplitter
@ -859,9 +864,8 @@ def trim_messages(
text_splitter=text_splitter_fn,
)
else:
raise ValueError(
f"Unrecognized {strategy=}. Supported strategies are 'last' and 'first'."
)
msg = f"Unrecognized {strategy=}. Supported strategies are 'last' and 'first'."
raise ValueError(msg)
def _first_max_tokens(
@ -995,10 +999,11 @@ def _msg_to_chunk(message: BaseMessage) -> BaseMessageChunk:
if isinstance(message, msg_cls):
return chunk_cls(**message.model_dump(exclude={"type"}))
raise ValueError(
msg = (
f"Unrecognized message class {message.__class__}. Supported classes are "
f"{list(_MSG_CHUNK_MAP.keys())}"
)
raise ValueError(msg)
def _chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage:
@ -1010,10 +1015,11 @@ def _chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage:
if isinstance(chunk, chunk_cls):
return msg_cls(**chunk.model_dump(exclude={"type", "tool_call_chunks"}))
raise ValueError(
msg = (
f"Unrecognized message chunk class {chunk.__class__}. Supported classes are "
f"{list(_CHUNK_MSG_MAP.keys())}"
)
raise ValueError(msg)
def _default_text_splitter(text: str) -> list[str]:

View File

@ -177,10 +177,11 @@ class BaseOutputParser(
if "args" in metadata and len(metadata["args"]) > 0:
return metadata["args"][0]
raise TypeError(
msg = (
f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. "
"Override the OutputType property to specify the output type."
)
raise TypeError(msg)
def invoke(
self,
@ -310,10 +311,11 @@ class BaseOutputParser(
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
raise NotImplementedError(
msg = (
f"_type property is not implemented in class {self.__class__.__name__}."
" This is required for serialization."
)
raise NotImplementedError(msg)
def dict(self, **kwargs: Any) -> dict:
"""Return dictionary representation of output parser."""

View File

@ -36,16 +36,14 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
"""
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
"This output parser can only be used with a chat generation."
)
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
message = generation.message
try:
func_call = copy.deepcopy(message.additional_kwargs["function_call"])
except KeyError as exc:
raise OutputParserException(
f"Could not parse function call: {exc}"
) from exc
msg = f"Could not parse function call: {exc}"
raise OutputParserException(msg) from exc
if self.args_only:
return func_call["arguments"]
@ -88,14 +86,12 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
"""
if len(result) != 1:
raise OutputParserException(
f"Expected exactly one result, but got {len(result)}"
)
msg = f"Expected exactly one result, but got {len(result)}"
raise OutputParserException(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
"This output parser can only be used with a chat generation."
)
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
message = generation.message
try:
function_call = message.additional_kwargs["function_call"]
@ -103,9 +99,8 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
if partial:
return None
else:
raise OutputParserException(
f"Could not parse function call: {exc}"
) from exc
msg = f"Could not parse function call: {exc}"
raise OutputParserException(msg) from exc
try:
if partial:
try:
@ -129,9 +124,8 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
function_call["arguments"], strict=self.strict
)
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f"Could not parse function call data: {exc}"
) from exc
msg = f"Could not parse function call data: {exc}"
raise OutputParserException(msg) from exc
else:
try:
return {
@ -141,9 +135,8 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
),
}
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f"Could not parse function call data: {exc}"
) from exc
msg = f"Could not parse function call data: {exc}"
raise OutputParserException(msg) from exc
except KeyError:
return None
@ -253,10 +246,11 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
and issubclass(schema, BaseModel)
)
elif values["args_only"] and isinstance(schema, dict):
raise ValueError(
msg = (
"If multiple pydantic schemas are provided then args_only should be"
" False."
)
raise ValueError(msg)
return values
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:

View File

@ -52,11 +52,12 @@ def parse_tool_call(
raw_tool_call["function"]["arguments"], strict=strict
)
except JSONDecodeError as e:
raise OutputParserException(
msg = (
f"Function {raw_tool_call['function']['name']} arguments:\n\n"
f"{raw_tool_call['function']['arguments']}\n\nare not valid JSON. "
f"Received JSONDecodeError {e}"
) from e
)
raise OutputParserException(msg) from e
parsed = {
"name": raw_tool_call["function"]["name"] or "",
"args": function_args or {},
@ -170,9 +171,8 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
"This output parser can only be used with a chat generation."
)
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
message = generation.message
if isinstance(message, AIMessage) and message.tool_calls:
tool_calls = [dict(tc) for tc in message.tool_calls]
@ -285,10 +285,11 @@ class PydanticToolsParser(JsonOutputToolsParser):
for res in json_results:
try:
if not isinstance(res["args"], dict):
raise ValueError(
msg = (
f"Tool arguments must be specified as a dict, received: "
f"{res['args']}"
)
raise ValueError(msg)
pydantic_objects.append(name_dict[res["type"]](**res["args"]))
except (ValidationError, ValueError) as e:
if partial:

View File

@ -29,10 +29,9 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
elif issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj)
else:
raise OutputParserException(
f"Unsupported model version for PydanticOutputParser: \
msg = f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}"
)
raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e
else: # pydantic v1

View File

@ -49,11 +49,12 @@ class _StreamingParser:
try:
import defusedxml # type: ignore
except ImportError as e:
raise ImportError(
msg = (
"defusedxml is not installed. "
"Please install it to use the defusedxml parser."
"You can install it with `pip install defusedxml` "
) from e
)
raise ImportError(msg) from e
_parser = defusedxml.ElementTree.DefusedXMLParser(target=TreeBuilder())
else:
_parser = None
@ -190,12 +191,13 @@ class XMLOutputParser(BaseTransformOutputParser):
try:
from defusedxml import ElementTree # type: ignore
except ImportError as e:
raise ImportError(
msg = (
"defusedxml is not installed. "
"Please install it to use the defusedxml parser."
"You can install it with `pip install defusedxml`"
"See https://github.com/tiran/defusedxml for more details"
) from e
)
raise ImportError(msg) from e
_et = ElementTree # Use the defusedxml parser
else:
_et = ET # Use the standard library parser

View File

@ -65,7 +65,8 @@ class ChatGeneration(Generation):
pass
self.text = text
except (KeyError, AttributeError) as e:
raise ValueError("Error while initializing ChatGeneration") from e
msg = "Error while initializing ChatGeneration"
raise ValueError(msg) from e
return self
@classmethod
@ -114,6 +115,7 @@ class ChatGenerationChunk(ChatGeneration):
generation_info=generation_info or None,
)
else:
raise TypeError(
msg = (
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
raise TypeError(msg)

View File

@ -64,6 +64,7 @@ class GenerationChunk(Generation):
generation_info=generation_info or None,
)
else:
raise TypeError(
msg = (
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
raise TypeError(msg)

View File

@ -70,21 +70,22 @@ class BasePromptTemplate(
def validate_variable_names(self) -> Self:
"""Validate variable names do not include restricted names."""
if "stop" in self.input_variables:
raise ValueError(
msg = (
"Cannot have an input variable named 'stop', as it is used internally,"
" please rename."
)
raise ValueError(msg)
if "stop" in self.partial_variables:
raise ValueError(
msg = (
"Cannot have an partial variable named 'stop', as it is used "
"internally, please rename."
)
raise ValueError(msg)
overall = set(self.input_variables).intersection(self.partial_variables)
if overall:
raise ValueError(
f"Found overlapping input and partial variables: {overall}"
)
msg = f"Found overlapping input and partial variables: {overall}"
raise ValueError(msg)
return self
@classmethod
@ -143,10 +144,11 @@ class BasePromptTemplate(
inner_input = {var_name: inner_input}
else:
raise TypeError(
msg = (
f"Expected mapping type as input to {self.__class__.__name__}. "
f"Received {type(inner_input)}."
)
raise TypeError(msg)
missing = set(self.input_variables).difference(inner_input)
if missing:
msg = (
@ -341,12 +343,14 @@ class BasePromptTemplate(
prompt.save(file_path="path/prompt.yaml")
"""
if self.partial_variables:
raise ValueError("Cannot save prompt with partial variables.")
msg = "Cannot save prompt with partial variables."
raise ValueError(msg)
# Fetch dictionary to save
prompt_dict = self.dict()
if "_type" not in prompt_dict:
raise NotImplementedError(f"Prompt {self} does not support saving.")
msg = f"Prompt {self} does not support saving."
raise NotImplementedError(msg)
# Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path
@ -361,7 +365,8 @@ class BasePromptTemplate(
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
def _get_document_info(doc: Document, prompt: BasePromptTemplate[str]) -> dict:
@ -371,11 +376,12 @@ def _get_document_info(doc: Document, prompt: BasePromptTemplate[str]) -> dict:
required_metadata = [
iv for iv in prompt.input_variables if iv != "page_content"
]
raise ValueError(
msg = (
f"Document prompt requires documents to have metadata variables: "
f"{required_metadata}. Received document with missing metadata: "
f"{list(missing_metadata)}."
)
raise ValueError(msg)
return {k: base_info[k] for k in prompt.input_variables}

View File

@ -236,10 +236,11 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
else kwargs[self.variable_name]
)
if not isinstance(value, list):
raise ValueError(
msg = (
f"variable {self.variable_name} should be a list of base messages, "
f"got {value} of type {type(value)}"
)
raise ValueError(msg)
value = convert_to_messages(value)
if self.n_messages:
value = value[-self.n_messages :]
@ -514,9 +515,8 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
return cls(prompt=prompt, **kwargs)
elif isinstance(template, list):
if (partial_variables is not None) and len(partial_variables) > 0:
raise ValueError(
"Partial variables are not supported for list of templates."
)
msg = "Partial variables are not supported for list of templates."
raise ValueError(msg)
prompt = []
for tmpl in template:
if isinstance(tmpl, str) or isinstance(tmpl, dict) and "text" in tmpl:
@ -536,11 +536,12 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
vars = get_template_variables(img_template, "f-string")
if vars:
if len(vars) > 1:
raise ValueError(
msg = (
"Only one format variable allowed per image"
f" template.\nGot: {vars}"
f"\nFrom: {tmpl}"
)
raise ValueError(msg)
input_variables = [vars[0]]
img_template = {"url": img_template}
img_template_obj = ImagePromptTemplate(
@ -559,13 +560,16 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
input_variables=input_variables, template=img_template
)
else:
raise ValueError(f"Invalid image template: {tmpl}")
msg = f"Invalid image template: {tmpl}"
raise ValueError(msg)
prompt.append(img_template_obj)
else:
raise ValueError(f"Invalid template: {tmpl}")
msg = f"Invalid template: {tmpl}"
raise ValueError(msg)
return cls(prompt=prompt, **kwargs)
else:
raise ValueError(f"Invalid template: {template}")
msg = f"Invalid template: {template}"
raise ValueError(msg)
@classmethod
def from_template_file(
@ -1042,7 +1046,8 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
prompt = HumanMessagePromptTemplate.from_template(other)
return ChatPromptTemplate(messages=self.messages + [prompt]) # type: ignore[call-arg]
else:
raise NotImplementedError(f"Unsupported operand type for +: {type(other)}")
msg = f"Unsupported operand type for +: {type(other)}"
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
@ -1085,11 +1090,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
input_vars = input_vars - optional_variables
if "input_variables" in values and values.get("validate_template"):
if input_vars != set(values["input_variables"]):
raise ValueError(
msg = (
"Got mismatched input_variables. "
f"Expected: {input_vars}. "
f"Got: {values['input_variables']}"
)
raise ValueError(msg)
else:
values["input_variables"] = sorted(input_vars)
if optional_variables:
@ -1214,7 +1220,8 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
message = message_template.format_messages(**kwargs)
result.extend(message)
else:
raise ValueError(f"Unexpected input: {message_template}")
msg = f"Unexpected input: {message_template}"
raise ValueError(msg)
return result
async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
@ -1241,7 +1248,8 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
message = await message_template.aformat_messages(**kwargs)
result.extend(message)
else:
raise ValueError(f"Unexpected input: {message_template}")
msg = f"Unexpected input: {message_template}"
raise ValueError(msg)
return result
def partial(self, **kwargs: Any) -> ChatPromptTemplate:
@ -1376,38 +1384,43 @@ def _create_template_from_message_type(
elif message_type == "placeholder":
if isinstance(template, str):
if template[0] != "{" or template[-1] != "}":
raise ValueError(
msg = (
f"Invalid placeholder template: {template}."
" Expected a variable name surrounded by curly braces."
)
raise ValueError(msg)
var_name = template[1:-1]
message = MessagesPlaceholder(variable_name=var_name, optional=True)
elif len(template) == 2 and isinstance(template[1], bool):
var_name_wrapped, is_optional = template
if not isinstance(var_name_wrapped, str):
raise ValueError(
msg = (
"Expected variable name to be a string." f" Got: {var_name_wrapped}"
)
raise ValueError(msg)
if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}":
raise ValueError(
msg = (
f"Invalid placeholder template: {var_name_wrapped}."
" Expected a variable name surrounded by curly braces."
)
raise ValueError(msg)
var_name = var_name_wrapped[1:-1]
message = MessagesPlaceholder(variable_name=var_name, optional=is_optional)
else:
raise ValueError(
msg = (
"Unexpected arguments for placeholder message type."
" Expected either a single string variable name"
" or a list of [variable_name: str, is_optional: bool]."
f" Got: {template}"
)
raise ValueError(msg)
else:
raise ValueError(
msg = (
f"Unexpected message type: {message_type}. Use one of 'human',"
f" 'user', 'ai', 'assistant', or 'system'."
)
raise ValueError(msg)
return message
@ -1448,7 +1461,8 @@ def _convert_to_message(
)
elif isinstance(message, tuple):
if len(message) != 2:
raise ValueError(f"Expected 2-tuple of (role, template), got {message}")
msg = f"Expected 2-tuple of (role, template), got {message}"
raise ValueError(msg)
message_type_str, template = message
if isinstance(message_type_str, str):
_message = _create_template_from_message_type(
@ -1461,6 +1475,7 @@ def _convert_to_message(
)
)
else:
raise NotImplementedError(f"Unsupported message type: {type(message)}")
msg = f"Unsupported message type: {type(message)}"
raise NotImplementedError(msg)
return _message

View File

@ -62,14 +62,12 @@ class _FewShotPromptTemplateMixin(BaseModel):
examples = values.get("examples")
example_selector = values.get("example_selector")
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
msg = "Only one of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
msg = "One of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
return values
@ -90,9 +88,8 @@ class _FewShotPromptTemplateMixin(BaseModel):
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
msg = "One of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
async def _aget_examples(self, **kwargs: Any) -> list[dict]:
"""Async get the examples to use for formatting the prompt.
@ -111,9 +108,8 @@ class _FewShotPromptTemplateMixin(BaseModel):
elif self.example_selector is not None:
return await self.example_selector.aselect_examples(kwargs)
else:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
msg = "One of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
@ -243,7 +239,8 @@ class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
ValueError: If example_selector is provided.
"""
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
msg = "Saving an example selector is not currently supported"
raise ValueError(msg)
return super().save(file_path)

View File

@ -54,14 +54,12 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
examples = values.get("examples")
example_selector = values.get("example_selector")
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
msg = "Only one of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
msg = "One of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
return values
@ -76,10 +74,11 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
expected_input_variables |= set(self.prefix.input_variables)
missing_vars = expected_input_variables.difference(input_variables)
if missing_vars:
raise ValueError(
msg = (
f"Got input_variables={input_variables}, but based on "
f"prefix/suffix expected {expected_input_variables}"
)
raise ValueError(msg)
else:
self.input_variables = sorted(
set(self.suffix.input_variables)
@ -216,5 +215,6 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
ValueError: If example_selector is provided.
"""
if self.example_selector:
raise ValueError("Saving an example selector is not currently supported")
msg = "Saving an example selector is not currently supported"
raise ValueError(msg)
return super().save(file_path)

View File

@ -20,11 +20,12 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"}
if overlap:
raise ValueError(
msg = (
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
raise ValueError(msg)
super().__init__(**kwargs)
@property
@ -91,13 +92,16 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
path = kwargs.get("path") or formatted.get("path")
detail = kwargs.get("detail") or formatted.get("detail")
if not url and not path:
raise ValueError("Must provide either url or path.")
msg = "Must provide either url or path."
raise ValueError(msg)
if not url:
if not isinstance(path, str):
raise ValueError("path must be a string.")
msg = "path must be a string."
raise ValueError(msg)
url = image_utils.image_to_data_url(path)
if not isinstance(url, str):
raise ValueError("url must be a string.")
msg = "url must be a string."
raise ValueError(msg)
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them

View File

@ -34,7 +34,8 @@ def load_prompt_from_config(config: dict) -> BasePromptTemplate:
config_type = config.pop("_type", "prompt")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} prompt not supported")
msg = f"Loading {config_type} prompt not supported"
raise ValueError(msg)
prompt_loader = type_to_loader_dict[config_type]
return prompt_loader(config)
@ -46,9 +47,8 @@ def _load_template(var_name: str, config: dict) -> dict:
if f"{var_name}_path" in config:
# If it does, make sure template variable doesn't also exist.
if var_name in config:
raise ValueError(
f"Both `{var_name}_path` and `{var_name}` cannot be provided."
)
msg = f"Both `{var_name}_path` and `{var_name}` cannot be provided."
raise ValueError(msg)
# Pop the template path from the config.
template_path = Path(config.pop(f"{var_name}_path"))
# Load the template.
@ -73,12 +73,12 @@ def _load_examples(config: dict) -> dict:
elif config["examples"].endswith((".yaml", ".yml")):
examples = yaml.safe_load(f)
else:
raise ValueError(
"Invalid file format. Only json or yaml formats are supported."
)
msg = "Invalid file format. Only json or yaml formats are supported."
raise ValueError(msg)
config["examples"] = examples
else:
raise ValueError("Invalid examples format. Only list or string are supported.")
msg = "Invalid examples format. Only list or string are supported."
raise ValueError(msg)
return config
@ -90,7 +90,8 @@ def _load_output_parser(config: dict) -> dict:
if output_parser_type == "default":
output_parser = StrOutputParser(**_config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
msg = f"Unsupported output parser {output_parser_type}"
raise ValueError(msg)
config["output_parser"] = output_parser
return config
@ -103,10 +104,11 @@ def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
# Load the example prompt.
if "example_prompt_path" in config:
if "example_prompt" in config:
raise ValueError(
msg = (
"Only one of example_prompt and example_prompt_path should "
"be specified."
)
raise ValueError(msg)
config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
else:
config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
@ -126,11 +128,12 @@ def _load_prompt(config: dict) -> PromptTemplate:
if template_format == "jinja2":
# Disabled due to:
# https://github.com/langchain-ai/langchain/issues/4394
raise ValueError(
msg = (
f"Loading templates with '{template_format}' format is no longer supported "
f"since it can lead to arbitrary code execution. Please migrate to using "
f"the 'f-string' template format, which does not suffer from this issue."
)
raise ValueError(msg)
return PromptTemplate(**config)
@ -151,11 +154,12 @@ def load_prompt(
RuntimeError: If the path is a Lang Chain Hub path.
"""
if isinstance(path, str) and path.startswith("lc://"):
raise RuntimeError(
msg = (
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
raise RuntimeError(msg)
return _load_prompt_from_file(path, encoding)
@ -173,7 +177,8 @@ def _load_prompt_from_file(
with open(file_path, encoding=encoding) as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Got unsupported file type {file_path.suffix}")
msg = f"Got unsupported file type {file_path.suffix}"
raise ValueError(msg)
# Load the prompt from the config now.
return load_prompt_from_config(config)
@ -186,7 +191,8 @@ def _load_chat_prompt(config: dict) -> ChatPromptTemplate:
config.pop("input_variables")
if not template:
raise ValueError("Can't load chat prompt without template")
msg = "Can't load chat prompt without template"
raise ValueError(msg)
return ChatPromptTemplate.from_template(template=template, **config)

View File

@ -89,12 +89,12 @@ class PromptTemplate(StringPromptTemplate):
if values.get("validate_template"):
if values["template_format"] == "mustache":
raise ValueError("Mustache templates cannot be validated.")
msg = "Mustache templates cannot be validated."
raise ValueError(msg)
if "input_variables" not in values:
raise ValueError(
"Input variables must be provided to validate the template."
)
msg = "Input variables must be provided to validate the template."
raise ValueError(msg)
all_inputs = values["input_variables"] + list(values["partial_variables"])
check_valid_template(
@ -131,13 +131,11 @@ class PromptTemplate(StringPromptTemplate):
# Allow for easy combining
if isinstance(other, PromptTemplate):
if self.template_format != "f-string":
raise ValueError(
"Adding prompt templates only supported for f-strings."
)
msg = "Adding prompt templates only supported for f-strings."
raise ValueError(msg)
if other.template_format != "f-string":
raise ValueError(
"Adding prompt templates only supported for f-strings."
)
msg = "Adding prompt templates only supported for f-strings."
raise ValueError(msg)
input_variables = list(
set(self.input_variables) | set(other.input_variables)
)
@ -147,7 +145,8 @@ class PromptTemplate(StringPromptTemplate):
partial_variables = dict(self.partial_variables.items())
for k, v in other.partial_variables.items():
if k in partial_variables:
raise ValueError("Cannot have same variable partialed twice.")
msg = "Cannot have same variable partialed twice."
raise ValueError(msg)
else:
partial_variables[k] = v
return PromptTemplate(
@ -161,7 +160,8 @@ class PromptTemplate(StringPromptTemplate):
prompt = PromptTemplate.from_template(other)
return self + prompt
else:
raise NotImplementedError(f"Unsupported operand type for +: {type(other)}")
msg = f"Unsupported operand type for +: {type(other)}"
raise NotImplementedError(msg)
@property
def _prompt_type(self) -> str:

View File

@ -42,13 +42,14 @@ def jinja2_formatter(template: str, /, **kwargs: Any) -> str:
try:
from jinja2.sandbox import SandboxedEnvironment
except ImportError as e:
raise ImportError(
msg = (
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
"Please be cautious when using jinja2 templates. "
"Do not expand jinja2 templates using unverified or user-controlled "
"inputs as that can result in arbitrary Python code execution."
) from e
)
raise ImportError(msg) from e
# This uses a sandboxed environment to prevent arbitrary code execution.
# Jinja2 uses an opt-out rather than opt-in approach for sand-boxing.
@ -89,10 +90,11 @@ def _get_jinja2_variables_from_template(template: str) -> set[str]:
try:
from jinja2 import Environment, meta
except ImportError as e:
raise ImportError(
msg = (
"jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`."
) from e
)
raise ImportError(msg) from e
env = Environment()
ast = env.parse(template)
variables = meta.find_undeclared_variables(ast)
@ -217,17 +219,19 @@ def check_valid_template(
try:
validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]
except KeyError as exc:
raise ValueError(
msg = (
f"Invalid template format {template_format!r}, should be one of"
f" {list(DEFAULT_FORMATTER_MAPPING)}."
) from exc
)
raise ValueError(msg) from exc
try:
validator_func(template, input_variables)
except (KeyError, IndexError) as exc:
raise ValueError(
msg = (
"Invalid prompt schema; check for mismatched or missing input parameters"
f" from {input_variables}."
) from exc
)
raise ValueError(msg) from exc
def get_template_variables(template: str, template_format: str) -> list[str]:
@ -253,7 +257,8 @@ def get_template_variables(template: str, template_format: str) -> list[str]:
elif template_format == "mustache":
input_variables = mustache_template_vars(template)
else:
raise ValueError(f"Unsupported template format: {template_format}")
msg = f"Unsupported template format: {template_format}"
raise ValueError(msg)
return sorted(input_variables)

View File

@ -156,6 +156,5 @@ class StructuredPrompt(ChatPromptTemplate):
name=name,
)
else:
raise NotImplementedError(
"Structured prompts need to be piped to a language model."
)
msg = "Structured prompts need to be piped to a language model."
raise NotImplementedError(msg)

View File

@ -292,10 +292,11 @@ class Runnable(Generic[Input, Output], ABC):
if type_args and len(type_args) == 2:
return type_args[0]
raise TypeError(
msg = (
f"Runnable {self.get_name()} doesn't have an inferable InputType. "
"Override the InputType property to specify the input type."
)
raise TypeError(msg)
@property
def OutputType(self) -> type[Output]: # noqa: N802
@ -313,10 +314,11 @@ class Runnable(Generic[Input, Output], ABC):
if type_args and len(type_args) == 2:
return type_args[1]
raise TypeError(
msg = (
f"Runnable {self.get_name()} doesn't have an inferable OutputType. "
"Override the OutputType property to specify the output type."
)
raise TypeError(msg)
@property
def input_schema(self) -> type[BaseModel]:
@ -1379,9 +1381,8 @@ class Runnable(Generic[Input, Output], ABC):
**kwargs,
)
else:
raise NotImplementedError(
'Only versions "v1" and "v2" of the schema is currently supported.'
)
msg = 'Only versions "v1" and "v2" of the schema is currently supported.'
raise NotImplementedError(msg)
async with aclosing(event_stream):
async for event in event_stream:
@ -2513,10 +2514,11 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
for key in kwargs:
if key not in self.model_fields:
raise ValueError(
msg = (
f"Configuration key {key} not found in {self}: "
f"available keys are {self.model_fields.keys()}"
)
raise ValueError(msg)
return RunnableConfigurableFields(default=self, fields=kwargs)
@ -2772,9 +2774,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
else:
steps_flat.append(coerce_to_runnable(step))
if len(steps_flat) < 2:
raise ValueError(
f"RunnableSequence must have at least 2 steps, got {len(steps_flat)}"
)
msg = f"RunnableSequence must have at least 2 steps, got {len(steps_flat)}"
raise ValueError(msg)
super().__init__( # type: ignore[call-arg]
first=steps_flat[0],
middle=list(steps_flat[1:-1]),
@ -2923,7 +2924,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
step_graph.trim_last_node()
step_first_node, _ = graph.extend(step_graph)
if not step_first_node:
raise ValueError(f"Runnable {step} has no first node")
msg = f"Runnable {step} has no first node"
raise ValueError(msg)
if current_last_node:
graph.add_edge(current_last_node, step_first_node)
@ -3655,9 +3657,11 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
else:
step_first_node, step_last_node = graph.extend(step_graph)
if not step_first_node:
raise ValueError(f"Runnable {step} has no first node")
msg = f"Runnable {step} has no first node"
raise ValueError(msg)
if not step_last_node:
raise ValueError(f"Runnable {step} has no last node")
msg = f"Runnable {step} has no last node"
raise ValueError(msg)
graph.add_edge(input_node, step_first_node)
graph.add_edge(step_last_node, output_node)
@ -4049,10 +4053,11 @@ class RunnableGenerator(Runnable[Input, Output]):
self._transform = transform
func_for_name = transform
else:
raise TypeError(
msg = (
"Expected a generator function type for `transform`."
f"Instead got an unsupported type: {type(transform)}"
)
raise TypeError(msg)
try:
self.name = name or func_for_name.__name__
@ -4161,7 +4166,8 @@ class RunnableGenerator(Runnable[Input, Output]):
**kwargs: Any,
) -> Iterator[Output]:
if not hasattr(self, "_transform"):
raise NotImplementedError(f"{repr(self)} only supports async methods.")
msg = f"{repr(self)} only supports async methods."
raise NotImplementedError(msg)
return self._transform_stream_with_config(
input,
self._transform, # type: ignore[arg-type]
@ -4192,7 +4198,8 @@ class RunnableGenerator(Runnable[Input, Output]):
**kwargs: Any,
) -> AsyncIterator[Output]:
if not hasattr(self, "_atransform"):
raise NotImplementedError(f"{repr(self)} only supports sync methods.")
msg = f"{repr(self)} only supports sync methods."
raise NotImplementedError(msg)
return self._atransform_stream_with_config(
input, self._atransform, config, **kwargs
@ -4320,21 +4327,23 @@ class RunnableLambda(Runnable[Input, Output]):
if is_async_callable(func) or is_async_generator(func):
if afunc is not None:
raise TypeError(
msg = (
"Func was provided as a coroutine function, but afunc was "
"also provided. If providing both, func should be a regular "
"function to avoid ambiguity."
)
raise TypeError(msg)
self.afunc = func
func_for_name = func
elif callable(func):
self.func = cast(Callable[[Input], Output], func)
func_for_name = func
else:
raise TypeError(
msg = (
"Expected a callable type for `func`."
f"Instead got an unsupported type: {type(func)}"
)
raise TypeError(msg)
try:
if name is not None:
@ -4497,9 +4506,11 @@ class RunnableLambda(Runnable[Input, Output]):
else:
dep_first_node, dep_last_node = graph.extend(dep_graph)
if not dep_first_node:
raise ValueError(f"Runnable {dep} has no first node")
msg = f"Runnable {dep} has no first node"
raise ValueError(msg)
if not dep_last_node:
raise ValueError(f"Runnable {dep} has no last node")
msg = f"Runnable {dep} has no last node"
raise ValueError(msg)
graph.add_edge(input_node, dep_first_node)
graph.add_edge(dep_last_node, output_node)
else:
@ -4560,9 +4571,10 @@ class RunnableLambda(Runnable[Input, Output]):
if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
raise RecursionError(
msg = (
f"Recursion limit reached when invoking {self} with input {input}."
)
raise RecursionError(msg)
output = output.invoke(
input,
patch_config(
@ -4659,9 +4671,10 @@ class RunnableLambda(Runnable[Input, Output]):
if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
raise RecursionError(
msg = (
f"Recursion limit reached when invoking {self} with input {input}."
)
raise RecursionError(msg)
output = await output.ainvoke(
input,
patch_config(
@ -4704,10 +4717,11 @@ class RunnableLambda(Runnable[Input, Output]):
**kwargs,
)
else:
raise TypeError(
msg = (
"Cannot invoke a coroutine function synchronously."
"Use `ainvoke` instead."
)
raise TypeError(msg)
async def ainvoke(
self,
@ -4778,10 +4792,11 @@ class RunnableLambda(Runnable[Input, Output]):
if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
raise RecursionError(
msg = (
f"Recursion limit reached when invoking "
f"{self} with input {final}."
)
raise RecursionError(msg)
for chunk in output.stream(
final,
patch_config(
@ -4809,10 +4824,11 @@ class RunnableLambda(Runnable[Input, Output]):
**kwargs,
)
else:
raise TypeError(
msg = (
"Cannot stream a coroutine function synchronously."
"Use `astream` instead."
)
raise TypeError(msg)
def stream(
self,
@ -4849,10 +4865,11 @@ class RunnableLambda(Runnable[Input, Output]):
afunc = self.afunc
else:
if inspect.isgeneratorfunction(self.func):
raise TypeError(
msg = (
"Cannot stream from a generator function asynchronously."
"Use .stream() instead."
)
raise TypeError(msg)
def func(
input: Input,
@ -4899,10 +4916,11 @@ class RunnableLambda(Runnable[Input, Output]):
if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
raise RecursionError(
msg = (
f"Recursion limit reached when invoking "
f"{self} with input {final}."
)
raise RecursionError(msg)
async for chunk in output.astream(
final,
patch_config(
@ -5061,9 +5079,8 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
**kwargs: Optional[Any],
) -> AsyncIterator[StreamEvent]:
for _ in range(1):
raise NotImplementedError(
"RunnableEach does not support astream_events yet."
)
msg = "RunnableEach does not support astream_events yet."
raise NotImplementedError(msg)
yield
@ -5819,10 +5836,11 @@ def coerce_to_runnable(thing: RunnableLike) -> Runnable[Input, Output]:
elif isinstance(thing, dict):
return cast(Runnable[Input, Output], RunnableParallel(thing))
else:
raise TypeError(
msg = (
f"Expected a Runnable, callable or dict."
f"Instead got an unsupported type: {type(thing)}"
)
raise TypeError(msg)
@overload

View File

@ -92,7 +92,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
ValueError: If a branch is not of length 2.
"""
if len(branches) < 2:
raise ValueError("RunnableBranch requires at least two branches")
msg = "RunnableBranch requires at least two branches"
raise ValueError(msg)
default = branches[-1]
@ -100,9 +101,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
default,
(Runnable, Callable, Mapping), # type: ignore[arg-type]
):
raise TypeError(
"RunnableBranch default must be Runnable, callable or mapping."
)
msg = "RunnableBranch default must be Runnable, callable or mapping."
raise TypeError(msg)
default_ = cast(
Runnable[Input, Output], coerce_to_runnable(cast(RunnableLike, default))
@ -112,16 +112,18 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
for branch in branches[:-1]:
if not isinstance(branch, (tuple, list)): # type: ignore[arg-type]
raise TypeError(
msg = (
f"RunnableBranch branches must be "
f"tuples or lists, not {type(branch)}"
)
raise TypeError(msg)
if not len(branch) == 2:
raise ValueError(
if len(branch) != 2:
msg = (
f"RunnableBranch branches must be "
f"tuples or lists of length 2, not {len(branch)}"
)
raise ValueError(msg)
condition, runnable = branch
condition = cast(Runnable[Input, bool], coerce_to_runnable(condition))
runnable = coerce_to_runnable(runnable)
@ -185,7 +187,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
and s.id.endswith(CONTEXT_CONFIG_SUFFIX_SET)
for s in specs
):
raise ValueError("RunnableBranch cannot contain context setters.")
msg = "RunnableBranch cannot contain context setters."
raise ValueError(msg)
return specs
def invoke(

View File

@ -219,12 +219,14 @@ def get_config_list(
"""
if length < 0:
raise ValueError(f"length must be >= 0, but got {length}")
msg = f"length must be >= 0, but got {length}"
raise ValueError(msg)
if isinstance(config, Sequence) and len(config) != length:
raise ValueError(
msg = (
f"config must be a list of the same length as inputs, "
f"but got {len(config)} configs for {length} inputs"
)
raise ValueError(msg)
if isinstance(config, Sequence):
return list(map(ensure_config, config))

View File

@ -632,7 +632,8 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
else:
return (alt(), config)
else:
raise ValueError(f"Unknown alternative: {which}")
msg = f"Unknown alternative: {which}"
raise ValueError(msg)
def _strremoveprefix(s: str, prefix: str) -> str:

View File

@ -152,10 +152,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
if self.exception_key is not None and not isinstance(input, dict):
raise ValueError(
msg = (
"If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input"
)
raise ValueError(msg)
# setup callbacks
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
@ -192,7 +193,8 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
msg = "No error stored at end of fallbacks."
raise ValueError(msg)
run_manager.on_chain_error(first_error)
raise first_error
@ -203,10 +205,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
**kwargs: Optional[Any],
) -> Output:
if self.exception_key is not None and not isinstance(input, dict):
raise ValueError(
msg = (
"If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input"
)
raise ValueError(msg)
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
@ -243,7 +246,8 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
await run_manager.on_chain_end(output)
return output
if first_error is None:
raise ValueError("No error stored at end of fallbacks.")
msg = "No error stored at end of fallbacks."
raise ValueError(msg)
await run_manager.on_chain_error(first_error)
raise first_error
@ -260,10 +264,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
if self.exception_key is not None and not all(
isinstance(input, dict) for input in inputs
):
raise ValueError(
msg = (
"If 'exception_key' is specified then inputs must be dictionaries."
f"However found a type of {type(inputs[0])} for input"
)
raise ValueError(msg)
if not inputs:
return []
@ -352,10 +357,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
if self.exception_key is not None and not all(
isinstance(input, dict) for input in inputs
):
raise ValueError(
msg = (
"If 'exception_key' is specified then inputs must be dictionaries."
f"However found a type of {type(inputs[0])} for input"
)
raise ValueError(msg)
if not inputs:
return []
@ -447,10 +453,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
) -> Iterator[Output]:
""""""
if self.exception_key is not None and not isinstance(input, dict):
raise ValueError(
msg = (
"If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input"
)
raise ValueError(msg)
# setup callbacks
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
@ -510,10 +517,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
if self.exception_key is not None and not isinstance(input, dict):
raise ValueError(
msg = (
"If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input"
)
raise ValueError(msg)
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)

View File

@ -330,7 +330,8 @@ class Graph:
ValueError: If a node with the same id already exists.
"""
if id is not None and id in self.nodes:
raise ValueError(f"Node with id {id} already exists")
msg = f"Node with id {id} already exists"
raise ValueError(msg)
id = id or self.next_id()
node = Node(id=id, data=data, metadata=metadata, name=node_data_str(id, data))
self.nodes[node.id] = node
@ -371,9 +372,11 @@ class Graph:
ValueError: If the source or target node is not in the graph.
"""
if source.id not in self.nodes:
raise ValueError(f"Source node {source.id} not in graph")
msg = f"Source node {source.id} not in graph"
raise ValueError(msg)
if target.id not in self.nodes:
raise ValueError(f"Target node {target.id} not in graph")
msg = f"Target node {target.id} not in graph"
raise ValueError(msg)
edge = Edge(
source=source.id, target=target.id, data=data, conditional=conditional
)

View File

@ -161,9 +161,8 @@ def _build_sugiyama_layout(
route_with_lines,
)
except ImportError as exc:
raise ImportError(
"Install grandalf to draw graphs: `pip install grandalf`."
) from exc
msg = "Install grandalf to draw graphs: `pip install grandalf`."
raise ImportError(msg) from exc
#
# Just a reminder about naming conventions:

View File

@ -104,11 +104,12 @@ def draw_mermaid(
if prefix and not self_loop:
subgraph = prefix.split(":")[-1]
if subgraph in seen_subgraphs:
raise ValueError(
msg = (
f"Found duplicate subgraph '{subgraph}' -- this likely means that "
"you're reusing a subgraph node with the same name. "
"Please adjust your graph to have subgraph nodes with unique names."
)
raise ValueError(msg)
seen_subgraphs.add(subgraph)
mermaid_graph += f"\tsubgraph {subgraph}\n"
@ -214,10 +215,11 @@ def draw_mermaid_png(
)
else:
supported_methods = ", ".join([m.value for m in MermaidDrawMethod])
raise ValueError(
msg = (
f"Invalid draw method: {draw_method}. "
f"Supported draw methods are: {supported_methods}"
)
raise ValueError(msg)
return img_bytes
@ -233,9 +235,8 @@ async def _render_mermaid_using_pyppeteer(
try:
from pyppeteer import launch # type: ignore[import]
except ImportError as e:
raise ImportError(
"Install Pyppeteer to use the Pyppeteer method: `pip install pyppeteer`."
) from e
msg = "Install Pyppeteer to use the Pyppeteer method: `pip install pyppeteer`."
raise ImportError(msg) from e
browser = await launch()
page = await browser.newPage()
@ -304,10 +305,11 @@ def _render_mermaid_using_api(
try:
import requests # type: ignore[import]
except ImportError as e:
raise ImportError(
msg = (
"Install the `requests` module to use the Mermaid.INK API: "
"`pip install requests`."
) from e
)
raise ImportError(msg) from e
# Use Mermaid API to render the image
mermaid_syntax_encoded = base64.b64encode(mermaid_syntax.encode("utf8")).decode(
@ -332,7 +334,8 @@ def _render_mermaid_using_api(
return img_bytes
else:
raise ValueError(
msg = (
f"Failed to render the graph using the Mermaid.INK API. "
f"Status code: {response.status_code}."
)
raise ValueError(msg)

View File

@ -136,9 +136,8 @@ class PngDrawer:
try:
import pygraphviz as pgv # type: ignore[import]
except ImportError as exc:
raise ImportError(
"Install pygraphviz to draw graphs: `pip install pygraphviz`."
) from exc
msg = "Install pygraphviz to draw graphs: `pip install pygraphviz`."
raise ImportError(msg) from exc
# Create a directed graph
viz = pgv.AGraph(directed=True, nodesep=0.9, ranksep=1.0)

View File

@ -472,16 +472,16 @@ class RunnableWithMessageHistory(RunnableBindingBase):
# This occurs for chat models - since we batch inputs
if isinstance(input_val[0], list):
if len(input_val) != 1:
raise ValueError(
f"Expected a single list of messages. Got {input_val}."
)
msg = f"Expected a single list of messages. Got {input_val}."
raise ValueError(msg)
return input_val[0]
return list(input_val)
else:
raise ValueError(
msg = (
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
f"Got {input_val}."
)
raise ValueError(msg)
def _get_output_messages(
self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict]
@ -513,10 +513,11 @@ class RunnableWithMessageHistory(RunnableBindingBase):
elif isinstance(output_val, (list, tuple)):
return list(output_val)
else:
raise ValueError(
msg = (
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
f"Got {output_val}."
)
raise ValueError(msg)
def _enter_history(self, input: Any, config: RunnableConfig) -> list[BaseMessage]:
hist: BaseChatMessageHistory = config["configurable"]["message_history"]
@ -593,12 +594,13 @@ class RunnableWithMessageHistory(RunnableBindingBase):
missing_key: "[your-value-here]" for missing_key in missing_keys
}
example_config = {"configurable": example_configurable}
raise ValueError(
msg = (
f"Missing keys {sorted(missing_keys)} in config['configurable'] "
f"Expected keys are {sorted(expected_keys)}."
f"When using via .invoke() or .stream(), pass in a config; "
f"e.g., chain.invoke({example_input}, {example_config})"
)
raise ValueError(msg)
if len(expected_keys) == 1:
if parameter_names:
@ -613,10 +615,11 @@ class RunnableWithMessageHistory(RunnableBindingBase):
else:
# otherwise verify that names of keys patch and invoke by named arguments
if set(expected_keys) != set(parameter_names):
raise ValueError(
msg = (
f"Expected keys {sorted(expected_keys)} do not match parameter "
f"names {sorted(parameter_names)} of get_session_history."
)
raise ValueError(msg)
message_history = self.get_session_history(
**{key: configurable[key] for key in expected_keys}

View File

@ -101,7 +101,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
key = input["key"]
actual_input = input["input"]
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
msg = f"No runnable associated with key '{key}'"
raise ValueError(msg)
runnable = self.runnables[key]
return runnable.invoke(actual_input, config)
@ -115,7 +116,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
key = input["key"]
actual_input = input["input"]
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
msg = f"No runnable associated with key '{key}'"
raise ValueError(msg)
runnable = self.runnables[key]
return await runnable.ainvoke(actual_input, config)
@ -134,7 +136,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
keys = [input["key"] for input in inputs]
actual_inputs = [input["input"] for input in inputs]
if any(key not in self.runnables for key in keys):
raise ValueError("One or more keys do not have a corresponding runnable")
msg = "One or more keys do not have a corresponding runnable"
raise ValueError(msg)
def invoke(
runnable: Runnable, input: Input, config: RunnableConfig
@ -169,7 +172,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
keys = [input["key"] for input in inputs]
actual_inputs = [input["input"] for input in inputs]
if any(key not in self.runnables for key in keys):
raise ValueError("One or more keys do not have a corresponding runnable")
msg = "One or more keys do not have a corresponding runnable"
raise ValueError(msg)
async def ainvoke(
runnable: Runnable, input: Input, config: RunnableConfig
@ -201,7 +205,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
key = input["key"]
actual_input = input["input"]
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
msg = f"No runnable associated with key '{key}'"
raise ValueError(msg)
runnable = self.runnables[key]
yield from runnable.stream(actual_input, config)
@ -215,7 +220,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
key = input["key"]
actual_input = input["input"]
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
msg = f"No runnable associated with key '{key}'"
raise ValueError(msg)
runnable = self.runnables[key]
async for output in runnable.astream(actual_input, config):

View File

@ -639,10 +639,11 @@ def get_unique_config_specs(
if len(others) == 0 or all(o == first for o in others):
unique.append(first)
else:
raise ValueError(
msg = (
"RunnableSequence contains conflicting config specs"
f"for {id}: {[first] + others}"
)
raise ValueError(msg)
return unique

View File

@ -24,19 +24,21 @@ class Visitor(ABC):
and self.allowed_operators is not None
and func not in self.allowed_operators
):
raise ValueError(
msg = (
f"Received disallowed operator {func}. Allowed "
f"comparators are {self.allowed_operators}"
)
raise ValueError(msg)
if (
isinstance(func, Comparator)
and self.allowed_comparators is not None
and func not in self.allowed_comparators
):
raise ValueError(
msg = (
f"Received disallowed comparator {func}. Allowed "
f"comparators are {self.allowed_comparators}"
)
raise ValueError(msg)
@abstractmethod
def visit_operation(self, operation: Operation) -> Any:

View File

@ -127,9 +127,8 @@ def _validate_docstring_args_against_annotations(
"""Raise error if docstring arg is not in type annotations."""
for docstring_arg in arg_descriptions:
if docstring_arg not in annotations:
raise ValueError(
f"Arg {docstring_arg} in docstring not found in function signature."
)
msg = f"Arg {docstring_arg} in docstring not found in function signature."
raise ValueError(msg)
def _infer_arg_descriptions(
@ -183,10 +182,11 @@ def _function_annotations_are_pydantic_v1(
for parameter in signature.parameters.values()
)
if any_v1_annotations and any_v2_annotations:
raise NotImplementedError(
msg = (
f"Function {func} contains a mix of Pydantic v1 and v2 annotations. "
"Only one version of Pydantic annotations per function is supported."
)
raise NotImplementedError(msg)
return any_v1_annotations and not any_v2_annotations
@ -335,7 +335,7 @@ class ChildTool(BaseTool):
args_schema: Type[BaseModel] = SchemaClass
..."""
name = cls.__name__
raise SchemaAnnotationError(
msg = (
f"Tool definition for {name} must include valid type annotations"
f" for argument 'args_schema' to behave as expected.\n"
f"Expected annotation of 'Type[BaseModel]'"
@ -343,6 +343,7 @@ class ChildTool(BaseTool):
f"Expected class looks like:\n"
f"{typehint_mandate}"
)
raise SchemaAnnotationError(msg)
name: str
"""The unique name of the tool that clearly communicates its purpose."""
@ -422,10 +423,11 @@ class ChildTool(BaseTool):
and kwargs["args_schema"] is not None
and not is_basemodel_subclass(kwargs["args_schema"])
):
raise TypeError(
msg = (
f"args_schema must be a subclass of pydantic BaseModel. "
f"Got: {kwargs['args_schema']}."
)
raise TypeError(msg)
super().__init__(**kwargs)
model_config = ConfigDict(
@ -515,10 +517,11 @@ class ChildTool(BaseTool):
result = input_args.parse_obj(tool_input)
result_dict = result.dict()
else:
raise NotImplementedError(
msg = (
"args_schema must be a Pydantic BaseModel, "
f"got {self.args_schema}"
)
raise NotImplementedError(msg)
return {
k: getattr(result, k)
for k, v in result_dict.items()
@ -653,12 +656,13 @@ class ChildTool(BaseTool):
response = context.run(self._run, *tool_args, **tool_kwargs)
if self.response_format == "content_and_artifact":
if not isinstance(response, tuple) or len(response) != 2:
raise ValueError(
msg = (
"Since response_format='content_and_artifact' "
"a two-tuple of the message content and raw tool output is "
f"expected. Instead generated response of type: "
f"{type(response)}."
)
raise ValueError(msg)
content, artifact = response
else:
content = response
@ -769,12 +773,13 @@ class ChildTool(BaseTool):
response = await coro
if self.response_format == "content_and_artifact":
if not isinstance(response, tuple) or len(response) != 2:
raise ValueError(
msg = (
"Since response_format='content_and_artifact' "
"a two-tuple of the message content and raw tool output is "
f"expected. Instead generated response of type: "
f"{type(response)}."
)
raise ValueError(msg)
content, artifact = response
else:
content = response
@ -825,10 +830,11 @@ def _handle_validation_error(
elif callable(flag):
content = flag(e)
else:
raise ValueError(
msg = (
f"Got unexpected type of `handle_validation_error`. Expected bool, "
f"str or callable. Received: {flag}"
)
raise ValueError(msg)
return content
@ -844,10 +850,11 @@ def _handle_tool_error(
elif callable(flag):
content = flag(e)
else:
raise ValueError(
msg = (
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
f"or callable. Received: {flag}"
)
raise ValueError(msg)
return content

View File

@ -146,7 +146,8 @@ def tool(
runnable = dec_func
if runnable.input_schema.model_json_schema().get("type") != "object":
raise ValueError("Runnable must have an object schema.")
msg = "Runnable must have an object schema."
raise ValueError(msg)
async def ainvoke_wrapper(
callbacks: Optional[Callbacks] = None, **kwargs: Any
@ -189,10 +190,11 @@ def tool(
# If someone doesn't want a schema applied, we must treat it as
# a simple string->string function
if dec_func.__doc__ is None:
raise ValueError(
msg = (
"Function must have a docstring if "
"description not provided and infer_schema is False."
)
raise ValueError(msg)
return Tool(
name=tool_name,
func=func,
@ -222,7 +224,8 @@ def tool(
return _partial
else:
raise ValueError("Too many arguments for tool decorator")
msg = "Too many arguments for tool decorator"
raise ValueError(msg)
def _get_description_from_runnable(runnable: Runnable) -> str:
@ -241,11 +244,12 @@ def _get_schema_from_runnable_and_arg_types(
try:
arg_types = get_type_hints(runnable.InputType)
except TypeError as e:
raise TypeError(
msg = (
"Tool input must be str or dict. If dict, dict arguments must be "
"typed. Either annotate types (e.g., with TypedDict) or pass "
f"arg_types into `.as_tool` to specify. {str(e)}"
) from e
)
raise TypeError(msg) from e
fields = {key: (key_type, Field(...)) for key, key_type in arg_types.items()}
return create_model(name, **fields) # type: ignore

View File

@ -68,11 +68,12 @@ class Tool(BaseTool):
# For backwards compatibility. The tool must be run with a single input
all_args = list(args) + list(kwargs.values())
if len(all_args) != 1:
raise ToolException(
msg = (
f"""Too many arguments to single-input tool {self.name}.
Consider using StructuredTool instead."""
f" Args: {all_args}"
)
raise ToolException(msg)
return tuple(all_args), {}
def _run(
@ -89,7 +90,8 @@ class Tool(BaseTool):
if config_param := _get_runnable_config_param(self.func):
kwargs[config_param] = config
return self.func(*args, **kwargs)
raise NotImplementedError("Tool does not support sync invocation.")
msg = "Tool does not support sync invocation."
raise NotImplementedError(msg)
async def _arun(
self,
@ -152,7 +154,8 @@ class Tool(BaseTool):
ValueError: If the function is not provided.
"""
if func is None and coroutine is None:
raise ValueError("Function and/or coroutine must be provided")
msg = "Function and/or coroutine must be provided"
raise ValueError(msg)
return cls(
name=name,
func=func,

View File

@ -78,7 +78,8 @@ class StructuredTool(BaseTool):
if config_param := _get_runnable_config_param(self.func):
kwargs[config_param] = config
return self.func(*args, **kwargs)
raise NotImplementedError("StructuredTool does not support sync invocation.")
msg = "StructuredTool does not support sync invocation."
raise NotImplementedError(msg)
async def _arun(
self,
@ -167,7 +168,8 @@ class StructuredTool(BaseTool):
elif coroutine is not None:
source_function = coroutine
else:
raise ValueError("Function and/or coroutine must be provided")
msg = "Function and/or coroutine must be provided"
raise ValueError(msg)
name = name or source_function.__name__
if args_schema is None and infer_schema:
# schema name is appended within function
@ -184,9 +186,8 @@ class StructuredTool(BaseTool):
if description_ is None and args_schema:
description_ = args_schema.__doc__ or None
if description_ is None:
raise ValueError(
"Function must have a docstring if description not provided."
)
msg = "Function must have a docstring if description not provided."
raise ValueError(msg)
if description is None:
# Only apply if using the function's docstring
description_ = textwrap.dedent(description_).strip()

View File

@ -40,9 +40,10 @@ def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Throw an error because this has been replaced by tracing_v2_enabled."""
raise RuntimeError(
msg = (
"tracing_enabled is no longer supported. Please use tracing_enabled_v2 instead."
)
raise RuntimeError(msg)
@contextmanager
@ -196,9 +197,8 @@ def register_configure_hook(
to a non-None value.
"""
if env_var is not None and handle_class is None:
raise ValueError(
"If env_var is set, handle_class must also be set to a non-None value."
)
msg = "If env_var is set, handle_class must also be set to a non-None value."
raise ValueError(msg)
from langchain_core.callbacks.base import BaseCallbackHandler
_configure_hooks.append(

View File

@ -137,17 +137,19 @@ class _TracerCore(ABC):
try:
run = self.run_map[str(run_id)]
except KeyError as exc:
raise TracerException(f"No indexed run ID {run_id}.") from exc
msg = f"No indexed run ID {run_id}."
raise TracerException(msg) from exc
if isinstance(run_type, str):
run_types: Union[set[str], None] = {run_type}
else:
run_types = run_type
if run_types is not None and run.run_type not in run_types:
raise TracerException(
msg = (
f"Found {run.run_type} run at ID {run_id}, "
f"but expected {run_types} run."
)
raise TracerException(msg)
return run
def _create_chat_model_run(
@ -170,10 +172,11 @@ class _TracerCore(ABC):
# This can eventually be cleaned up by writing a "modern" tracer
# that has all the updated schema changes corresponding to
# the "streaming_events" format.
raise NotImplementedError(
msg = (
f"Chat model tracing is not supported in "
f"for {self._schema_format} format."
)
raise NotImplementedError(msg)
start_time = datetime.now(timezone.utc)
if metadata:
kwargs.update({"metadata": metadata})
@ -338,7 +341,8 @@ class _TracerCore(ABC):
"input": inputs,
}
else:
raise ValueError(f"Invalid format: {self._schema_format}")
msg = f"Invalid format: {self._schema_format}"
raise ValueError(msg)
def _get_chain_outputs(self, outputs: Any) -> Any:
"""Get the outputs for a chain run."""
@ -349,7 +353,8 @@ class _TracerCore(ABC):
"output": outputs,
}
else:
raise ValueError(f"Invalid format: {self._schema_format}")
msg = f"Invalid format: {self._schema_format}"
raise ValueError(msg)
def _complete_chain_run(
self,
@ -404,7 +409,8 @@ class _TracerCore(ABC):
elif self._schema_format == "streaming_events":
inputs = {"input": inputs}
else:
raise AssertionError(f"Invalid format: {self._schema_format}")
msg = f"Invalid format: {self._schema_format}"
raise AssertionError(msg)
return Run(
id=run_id,

View File

@ -159,10 +159,11 @@ class EvaluatorCallbackHandler(BaseTracer):
elif isinstance(results, dict) and "results" in results:
results_ = cast(list[EvaluationResult], results["results"])
else:
raise TypeError(
msg = (
f"Invalid evaluation result type {type(results)}."
" Expected EvaluationResult or EvaluationResults."
)
raise TypeError(msg)
return results_
def _log_evaluation_feedback(

View File

@ -136,10 +136,11 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
while parent_id := self.parent_map.get(run_id):
str_parent_id = str(parent_id)
if str_parent_id in parent_ids:
raise AssertionError(
msg = (
f"Parent ID {parent_id} is already in the parent_ids list. "
f"This should never happen."
)
raise AssertionError(msg)
parent_ids.append(str_parent_id)
run_id = parent_id
@ -411,7 +412,8 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
chunk_: Union[GenerationChunk, BaseMessageChunk]
if run_info is None:
raise AssertionError(f"Run ID {run_id} not found in run map.")
msg = f"Run ID {run_id} not found in run map."
raise AssertionError(msg)
if self.is_tapped.get(run_id):
return
if run_info["run_type"] == "chat_model":
@ -429,7 +431,8 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
else:
chunk_ = cast(GenerationChunk, chunk)
else:
raise ValueError(f"Unexpected run type: {run_info['run_type']}")
msg = f"Unexpected run type: {run_info['run_type']}"
raise ValueError(msg)
self._send(
{
@ -484,7 +487,8 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
}
event = "on_llm_end"
else:
raise ValueError(f"Unexpected run type: {run_info['run_type']}")
msg = f"Unexpected run type: {run_info['run_type']}"
raise ValueError(msg)
self._send(
{
@ -626,10 +630,11 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
"""End a trace for a tool run."""
run_info = self.run_map.pop(run_id)
if "inputs" not in run_info:
raise AssertionError(
msg = (
f"Run ID {run_id} is a tool call and is expected to have "
f"inputs associated with it."
)
raise AssertionError(msg)
inputs = run_info["inputs"]
self._send(
@ -839,11 +844,12 @@ async def _astream_events_implementation_v1(
if event_type == "stream":
num_chunks = len(log_entry["streamed_output"])
if num_chunks != 1:
raise AssertionError(
msg = (
f"Expected exactly one chunk of streamed output, "
f"got {num_chunks} instead. This is impossible. "
f"Encountered in: {log_entry['name']}"
)
raise AssertionError(msg)
data = {"chunk": log_entry["streamed_output"][0]}
# Clean up the stream, we don't need it anymore.
@ -866,11 +872,12 @@ async def _astream_events_implementation_v1(
if state["streamed_output"]:
num_chunks = len(state["streamed_output"])
if num_chunks != 1:
raise AssertionError(
msg = (
f"Expected exactly one chunk of streamed output, "
f"got {num_chunks} instead. This is impossible. "
f"Encountered in: {state['name']}"
)
raise AssertionError(msg)
data = {"chunk": state["streamed_output"][0]}
# Clean up the stream, we don't need it anymore.
@ -945,10 +952,11 @@ async def _astream_events_implementation_v2(
callbacks.add_handler(event_streamer, inherit=True)
config["callbacks"] = callbacks
else:
raise ValueError(
msg = (
f"Unexpected type for callbacks: {callbacks}."
"Expected None, list or AsyncCallbackManager."
)
raise ValueError(msg)
# Call the runnable in streaming mode,
# add each chunk to the output stream

View File

@ -191,7 +191,8 @@ class LangChainTracer(BaseTracer):
ValueError: If the run URL cannot be found.
"""
if not self.latest_run:
raise ValueError("No traced run found.")
msg = "No traced run found."
raise ValueError(msg)
# If this is the first run in a project, the project may not yet be created.
# This method is only really useful for debugging flows, so we will assume
# there is some tolerace for latency.
@ -204,7 +205,8 @@ class LangChainTracer(BaseTracer):
return self.client.get_run_url(
run=self.latest_run, project_name=self.project_name
)
raise ValueError("Failed to get run URL.")
msg = "Failed to get run URL."
raise ValueError(msg)
def _get_tags(self, run: Run) -> list[str]:
"""Get combined tags for a run."""

View File

@ -3,14 +3,16 @@ from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any:
"""Throw an error because this has been replaced by get_headers."""
raise RuntimeError(
msg = (
"get_headers for LangChainTracerV1 is no longer supported. "
"Please use LangChainTracer instead."
)
raise RuntimeError(msg)
def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802
"""Throw an error because this has been replaced by LangChainTracer."""
raise RuntimeError(
msg = (
"LangChainTracerV1 is no longer supported. Please use LangChainTracer instead."
)
raise RuntimeError(msg)

View File

@ -104,9 +104,8 @@ class RunLogPatch:
state = jsonpatch.apply_patch(None, copy.deepcopy(ops))
return RunLog(*ops, state=state)
raise TypeError(
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
def __repr__(self) -> str:
from pprint import pformat
@ -134,9 +133,8 @@ class RunLog(RunLogPatch):
state = jsonpatch.apply_patch(self.state, other.ops)
return RunLog(*ops, state=state)
raise TypeError(
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
def __repr__(self) -> str:
from pprint import pformat
@ -197,10 +195,11 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
ValueError: If an invalid schema format is provided (internal use only).
"""
if _schema_format not in {"original", "streaming_events"}:
raise ValueError(
msg = (
f"Invalid schema format: {_schema_format}. "
f"Expected one of 'original', 'streaming_events'."
)
raise ValueError(msg)
super().__init__(_schema_format=_schema_format)
self.auto_close = auto_close
@ -496,11 +495,12 @@ def _get_standardized_inputs(
None means that the input is not yet known!
"""
if schema_format == "original":
raise NotImplementedError(
msg = (
"Do not assign inputs with original schema drop the key for now."
"When inputs are added to astream_log they should be added with "
"standardized schema for streaming events."
)
raise NotImplementedError(msg)
inputs = load(run.inputs)
@ -613,10 +613,11 @@ async def _astream_log_implementation(
callbacks.add_handler(stream, inherit=True)
config["callbacks"] = callbacks
else:
raise ValueError(
msg = (
f"Unexpected type for callbacks: {callbacks}."
"Expected None, list or AsyncCallbackManager."
)
raise ValueError(msg)
# Call the runnable in streaming mode,
# add each chunk to the output stream

View File

@ -34,10 +34,11 @@ def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]
elif right_v is None:
continue
elif type(merged[right_k]) is not type(right_v):
raise TypeError(
msg = (
f'additional_kwargs["{right_k}"] already exists in this message,'
" but with a different type."
)
raise TypeError(msg)
elif isinstance(merged[right_k], str):
# TODO: Add below special handling for 'type' key in 0.3 and remove
# merge_lists 'type' logic.
@ -60,10 +61,11 @@ def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]
elif merged[right_k] == right_v:
continue
else:
raise TypeError(
msg = (
f"Additional kwargs key {right_k} already exists in left dict and "
f"value has unsupported type {type(merged[right_k])}."
)
raise TypeError(msg)
return merged
@ -125,10 +127,11 @@ def merge_obj(left: Any, right: Any) -> Any:
if left is None or right is None:
return left if left is not None else right
elif type(left) is not type(right):
raise TypeError(
msg = (
f"left and right are of different types. Left type: {type(left)}. Right "
f"type: {type(right)}."
)
raise TypeError(msg)
elif isinstance(left, str):
return left + right
elif isinstance(left, dict):
@ -138,7 +141,8 @@ def merge_obj(left: Any, right: Any) -> Any:
elif left == right:
return left
else:
raise ValueError(
msg = (
f"Unable to merge {left=} and {right=}. Both must be of type str, dict, or "
f"list, or else be two equal objects."
)
raise ValueError(msg)

View File

@ -60,7 +60,8 @@ def py_anext(
Callable[[AsyncIterator[T]], Awaitable[T]], type(iterator).__anext__
)
except AttributeError as e:
raise TypeError(f"{iterator!r} is not an async iterator") from e
msg = f"{iterator!r} is not an async iterator"
raise TypeError(msg) from e
if default is _no_default:
return __anext__(iterator)

View File

@ -73,8 +73,9 @@ def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
elif default is not None:
return default
else:
raise ValueError(
msg = (
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
raise ValueError(msg)

View File

@ -25,10 +25,11 @@ class StrictFormatter(Formatter):
ValueError: If any arguments are provided.
"""
if len(args) > 0:
raise ValueError(
msg = (
"No arguments should be provided, "
"everything should be passed as keyword arguments."
)
raise ValueError(msg)
return super().vformat(format_string, args, kwargs)
def validate_input_variables(

View File

@ -105,7 +105,8 @@ def convert_pydantic_to_openai_function(
elif hasattr(model, "schema"):
schema = model.schema() # Pydantic 1
else:
raise TypeError("Model must be a Pydantic model.")
msg = "Model must be a Pydantic model."
raise TypeError(msg)
schema = dereference_refs(schema)
if "definitions" in schema: # pydantic 1
schema.pop("definitions", None)
@ -237,11 +238,12 @@ def _convert_any_typed_dicts_to_pydantic(
if (field_desc := field_kwargs.get("description")) and not isinstance(
field_desc, str
):
raise ValueError(
msg = (
f"Invalid annotation for field {arg}. Third argument to "
f"Annotated must be a string description, received value of "
f"type {type(field_desc)}."
)
raise ValueError(msg)
elif arg_desc := arg_descriptions.get(arg):
field_kwargs["description"] = arg_desc
else:
@ -387,12 +389,13 @@ def convert_to_openai_function(
elif callable(function):
oai_function = cast(dict, convert_python_function_to_openai_function(function))
else:
raise ValueError(
msg = (
f"Unsupported function\n\n{function}\n\nFunctions must be passed in"
" as Dict, pydantic.BaseModel, or Callable. If they're a dict they must"
" either be in OpenAI function format or valid JSON schema with top-level"
" 'title' and 'description' keys."
)
raise ValueError(msg)
if strict is not None:
oai_function["strict"] = strict
@ -553,7 +556,8 @@ def _parse_google_docstring(
if filtered_annotations and (
len(docstring_blocks) < 2 or not docstring_blocks[1].startswith("Args:")
):
raise ValueError("Found invalid Google-Style docstring.")
msg = "Found invalid Google-Style docstring."
raise ValueError(msg)
descriptors = []
args_block = None
past_descriptors = False
@ -571,7 +575,8 @@ def _parse_google_docstring(
description = " ".join(descriptors)
else:
if error_on_invalid_docstring:
raise ValueError("Found invalid Google-Style docstring.")
msg = "Found invalid Google-Style docstring."
raise ValueError(msg)
description = ""
args_block = None
arg_descriptions = {}

View File

@ -179,11 +179,13 @@ def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict:
try:
json_obj = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise OutputParserException(f"Got invalid JSON object. Error: {e}") from e
msg = f"Got invalid JSON object. Error: {e}"
raise OutputParserException(msg) from e
for key in expected_keys:
if key not in json_obj:
raise OutputParserException(
msg = (
f"Got invalid return object. Expected key `{key}` "
f"to be present, but got {json_obj}"
)
raise OutputParserException(msg)
return json_obj

View File

@ -8,10 +8,11 @@ from typing import Any, Optional
def _retrieve_ref(path: str, schema: dict) -> dict:
components = path.split("/")
if components[0] != "#":
raise ValueError(
msg = (
"ref paths are expected to be URI fragments, meaning they should start "
"with #."
)
raise ValueError(msg)
out = schema
for component in components[1:]:
if component in out:
@ -19,7 +20,8 @@ def _retrieve_ref(path: str, schema: dict) -> dict:
elif component.isdigit() and int(component) in out:
out = out[int(component)]
else:
raise KeyError(f"Reference '{path}' not found.")
msg = f"Reference '{path}' not found."
raise KeyError(msg)
return deepcopy(out)

View File

@ -144,7 +144,8 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
try:
tag, template = template.split(r_del, 1)
except ValueError as e:
raise ChevronError("unclosed tag " f"at line {_CURRENT_LINE}") from e
msg = "unclosed tag " f"at line {_CURRENT_LINE}"
raise ChevronError(msg) from e
# Find the type meaning of the first character
tag_type = tag_types.get(tag[0], "variable")
@ -164,9 +165,8 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
# Otherwise we should complain
else:
raise ChevronError(
"unclosed set delimiter tag\n" f"at line {_CURRENT_LINE}"
)
msg = "unclosed set delimiter tag\n" f"at line {_CURRENT_LINE}"
raise ChevronError(msg)
elif (
# If we might be a no html escape tag
@ -275,18 +275,20 @@ def tokenize(
try:
last_section = open_sections.pop()
except IndexError as e:
raise ChevronError(
msg = (
f'Trying to close tag "{tag_key}"\n'
"Looks like it was not opened.\n"
f"line {_CURRENT_LINE + 1}"
) from e
)
raise ChevronError(msg) from e
if tag_key != last_section:
# Otherwise we need to complain
raise ChevronError(
msg = (
f'Trying to close tag "{tag_key}"\n'
f'last open tag is "{last_section}"\n'
f"line {_CURRENT_LINE + 1}"
)
raise ChevronError(msg)
# Do the second check to see if we're a standalone
is_standalone = r_sa_check(template, tag_type, is_standalone)
@ -313,11 +315,12 @@ def tokenize(
# If there are any open sections when we're done
if open_sections:
# Then we need to complain
raise ChevronError(
msg = (
"Unexpected EOF\n"
f'the tag "{open_sections[-1]}" was never closed\n'
f"was opened at line {_LAST_TAG_LINE}"
)
raise ChevronError(msg)
#

View File

@ -63,7 +63,8 @@ elif PYDANTIC_MAJOR_VERSION == 2:
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore
TypeBaseModel = Union[type[BaseModel], type[pydantic.BaseModel]] # type: ignore
else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}")
msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
@ -116,7 +117,8 @@ def is_basemodel_subclass(cls: type) -> bool:
if issubclass(cls, BaseModelV1):
return True
else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}")
msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
return False
@ -144,7 +146,8 @@ def is_basemodel_instance(obj: Any) -> bool:
if isinstance(obj, BaseModelV1):
return True
else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}")
msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
return False
@ -233,9 +236,8 @@ def _create_subset_model_v1(
elif PYDANTIC_MAJOR_VERSION == 2:
from pydantic.v1 import create_model # type: ignore
else:
raise NotImplementedError(
f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}"
)
msg = f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise NotImplementedError(msg)
fields = {}
@ -339,9 +341,8 @@ def _create_subset_model(
fn_description=fn_description,
)
else:
raise NotImplementedError(
f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}"
)
msg = f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise NotImplementedError(msg)
if PYDANTIC_MAJOR_VERSION == 2:
@ -376,7 +377,8 @@ if PYDANTIC_MAJOR_VERSION == 2:
elif hasattr(model, "__fields__"):
return model.__fields__ # type: ignore
else:
raise TypeError(f"Expected a Pydantic model. Got {type(model)}")
msg = f"Expected a Pydantic model. Got {type(model)}"
raise TypeError(msg)
elif PYDANTIC_MAJOR_VERSION == 1:
from pydantic import BaseModel as BaseModelV1_
@ -386,7 +388,8 @@ elif PYDANTIC_MAJOR_VERSION == 1:
"""Get the field names of a Pydantic model."""
return model.__fields__ # type: ignore
else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}")
msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
_SchemaConfig = ConfigDict(
arbitrary_types_allowed=True, frozen=True, protected_namespaces=()
@ -536,11 +539,12 @@ def _remap_field_definitions(field_definitions: dict[str, Any]) -> dict[str, Any
if key.startswith("_") or key in _RESERVED_NAMES:
# Let's add a prefix to avoid colliding with internal pydantic fields
if isinstance(value, FieldInfo):
raise NotImplementedError(
msg = (
f"Remapping for fields starting with '_' or fields with a name "
f"matching a reserved name {_RESERVED_NAMES} is not supported if "
f" the field is a pydantic Field instance. Got {key}."
)
raise NotImplementedError(msg)
type_, default_ = value
remapped[f"private_{key}"] = (
type_,
@ -583,10 +587,11 @@ def create_model_v2(
if root:
if field_definitions:
raise NotImplementedError(
msg = (
"When specifying __root__ no other "
f"fields should be provided. Got {field_definitions}"
)
raise NotImplementedError(msg)
if isinstance(root, tuple):
kwargs = {"type_": root[0], "default_": root[1]}

View File

@ -44,11 +44,12 @@ def xor_args(*arg_groups: tuple[str, ...]) -> Callable:
invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups:
invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups]
raise ValueError(
msg = (
"Exactly one argument in each of the following"
" groups must be defined:"
f" {', '.join(invalid_group_names)}"
)
raise ValueError(msg)
return func(*args, **kwargs)
return wrapper
@ -134,10 +135,11 @@ def guard_import(
module = importlib.import_module(module_name, package)
except (ImportError, ModuleNotFoundError) as e:
pip_name = pip_name or module_name.split(".")[0].replace("_", "-")
raise ImportError(
msg = (
f"Could not import {module_name} python package. "
f"Please install it with `pip install {pip_name}`."
) from e
)
raise ImportError(msg) from e
return module
@ -166,25 +168,29 @@ def check_package_version(
"""
imported_version = parse(version(package))
if lt_version is not None and imported_version >= parse(lt_version):
raise ValueError(
msg = (
f"Expected {package} version to be < {lt_version}. Received "
f"{imported_version}."
)
raise ValueError(msg)
if lte_version is not None and imported_version > parse(lte_version):
raise ValueError(
msg = (
f"Expected {package} version to be <= {lte_version}. Received "
f"{imported_version}."
)
raise ValueError(msg)
if gt_version is not None and imported_version <= parse(gt_version):
raise ValueError(
msg = (
f"Expected {package} version to be > {gt_version}. Received "
f"{imported_version}."
)
raise ValueError(msg)
if gte_version is not None and imported_version < parse(gte_version):
raise ValueError(
msg = (
f"Expected {package} version to be >= {gte_version}. Received "
f"{imported_version}."
)
raise ValueError(msg)
def get_pydantic_field_names(pydantic_cls: Any) -> set[str]:
@ -230,7 +236,8 @@ def _build_model_kwargs(
extra_kwargs = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra_kwargs:
raise ValueError(f"Found {field_name} supplied twice.")
msg = f"Found {field_name} supplied twice."
raise ValueError(msg)
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
@ -276,7 +283,8 @@ def build_extra_kwargs(
"""
for field_name in list(values):
if field_name in extra_kwargs:
raise ValueError(f"Found {field_name} supplied twice.")
msg = f"Found {field_name} supplied twice."
raise ValueError(msg)
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
@ -288,10 +296,11 @@ def build_extra_kwargs(
invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys())
if invalid_model_kwargs:
raise ValueError(
msg = (
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
raise ValueError(msg)
return extra_kwargs
@ -386,11 +395,12 @@ def from_env(
if error_message:
raise ValueError(error_message)
else:
raise ValueError(
msg = (
f"Did not find {key}, please add an environment variable"
f" `{key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
raise ValueError(msg)
return get_from_env_fn
@ -449,10 +459,11 @@ def secret_from_env(
if error_message:
raise ValueError(error_message)
else:
raise ValueError(
msg = (
f"Did not find {key}, please add an environment variable"
f" `{key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
raise ValueError(msg)
return get_secret_from_env

View File

@ -93,10 +93,11 @@ class VectorStore(ABC):
texts if isinstance(texts, (list, tuple)) else list(texts)
)
if metadatas and len(metadatas) != len(texts_):
raise ValueError(
msg = (
"The number of metadatas must match the number of texts."
f"Got {len(metadatas)} metadatas and {len(texts_)} texts."
)
raise ValueError(msg)
metadatas_ = iter(metadatas) if metadatas else cycle([{}])
ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None])
docs = [
@ -108,9 +109,8 @@ class VectorStore(ABC):
kwargs["ids"] = ids
return self.add_documents(docs, **kwargs)
raise NotImplementedError(
f"`add_texts` has not been implemented for {self.__class__.__name__} "
)
msg = f"`add_texts` has not been implemented for {self.__class__.__name__} "
raise NotImplementedError(msg)
@property
def embeddings(self) -> Optional[Embeddings]:
@ -133,7 +133,8 @@ class VectorStore(ABC):
False otherwise, None if not implemented.
"""
raise NotImplementedError("delete method must be implemented by subclass.")
msg = "delete method must be implemented by subclass."
raise NotImplementedError(msg)
def get_by_ids(self, ids: Sequence[str], /) -> list[Document]:
"""Get documents by their IDs.
@ -159,9 +160,8 @@ class VectorStore(ABC):
.. versionadded:: 0.2.11
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not yet support get_by_ids."
)
msg = f"{self.__class__.__name__} does not yet support get_by_ids."
raise NotImplementedError(msg)
# Implementations should override this method to provide an async native version.
async def aget_by_ids(self, ids: Sequence[str], /) -> list[Document]:
@ -243,10 +243,11 @@ class VectorStore(ABC):
texts if isinstance(texts, (list, tuple)) else list(texts)
)
if metadatas and len(metadatas) != len(texts_):
raise ValueError(
msg = (
"The number of metadatas must match the number of texts."
f"Got {len(metadatas)} metadatas and {len(texts_)} texts."
)
raise ValueError(msg)
metadatas_ = iter(metadatas) if metadatas else cycle([{}])
ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None])
@ -284,10 +285,11 @@ class VectorStore(ABC):
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return self.add_texts(texts, metadatas, **kwargs)
raise NotImplementedError(
msg = (
f"`add_documents` and `add_texts` has not been implemented "
f"for {self.__class__.__name__} "
)
raise NotImplementedError(msg)
async def aadd_documents(
self, documents: list[Document], **kwargs: Any
@ -347,11 +349,12 @@ class VectorStore(ABC):
elif search_type == "mmr":
return self.max_marginal_relevance_search(query, **kwargs)
else:
raise ValueError(
msg = (
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold'"
" or 'mmr'."
)
raise ValueError(msg)
async def asearch(
self, query: str, search_type: str, **kwargs: Any
@ -381,10 +384,11 @@ class VectorStore(ABC):
elif search_type == "mmr":
return await self.amax_marginal_relevance_search(query, **kwargs)
else:
raise ValueError(
msg = (
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold' or 'mmr'."
)
raise ValueError(msg)
@abstractmethod
def similarity_search(
@ -1035,17 +1039,19 @@ class VectorStoreRetriever(BaseRetriever):
"""
search_type = values.get("search_type", "similarity")
if search_type not in cls.allowed_search_types:
raise ValueError(
msg = (
f"search_type of {search_type} not allowed. Valid values are: "
f"{cls.allowed_search_types}"
)
raise ValueError(msg)
if search_type == "similarity_score_threshold":
score_threshold = values.get("search_kwargs", {}).get("score_threshold")
if (score_threshold is None) or (not isinstance(score_threshold, float)):
raise ValueError(
msg = (
"`score_threshold` is not specified with a float value(0~1) "
"in `search_kwargs`."
)
raise ValueError(msg)
return values
def _get_ls_params(self, **kwargs: Any) -> LangSmithRetrieverParams:
@ -1084,7 +1090,8 @@ class VectorStoreRetriever(BaseRetriever):
query, **self.search_kwargs
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
msg = f"search_type of {self.search_type} not allowed."
raise ValueError(msg)
return docs
async def _aget_relevant_documents(
@ -1106,7 +1113,8 @@ class VectorStoreRetriever(BaseRetriever):
query, **self.search_kwargs
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
msg = f"search_type of {self.search_type} not allowed."
raise ValueError(msg)
return docs
def add_documents(self, documents: list[Document], **kwargs: Any) -> list[str]:

View File

@ -175,10 +175,11 @@ class InMemoryVectorStore(VectorStore):
vectors = self.embedding.embed_documents(texts)
if ids and len(ids) != len(texts):
raise ValueError(
msg = (
f"ids must be the same length as texts. "
f"Got {len(ids)} ids and {len(texts)} texts."
)
raise ValueError(msg)
id_iterator: Iterator[Optional[str]] = (
iter(ids) if ids else iter(doc.id for doc in documents)
@ -207,10 +208,11 @@ class InMemoryVectorStore(VectorStore):
vectors = await self.embedding.aembed_documents(texts)
if ids and len(ids) != len(texts):
raise ValueError(
msg = (
f"ids must be the same length as texts. "
f"Got {len(ids)} ids and {len(texts)} texts."
)
raise ValueError(msg)
id_iterator: Iterator[Optional[str]] = (
iter(ids) if ids else iter(doc.id for doc in documents)
@ -432,10 +434,11 @@ class InMemoryVectorStore(VectorStore):
try:
import numpy as np
except ImportError as e:
raise ImportError(
msg = (
"numpy must be installed to use max_marginal_relevance_search "
"pip install numpy"
) from e
)
raise ImportError(msg) from e
mmr_chosen_indices = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),

View File

@ -35,10 +35,11 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
try:
import numpy as np
except ImportError as e:
raise ImportError(
msg = (
"cosine_similarity requires numpy to be installed. "
"Please install numpy with `pip install numpy`."
) from e
)
raise ImportError(msg) from e
if len(x) == 0 or len(y) == 0:
return np.array([])
@ -46,10 +47,11 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
x = np.array(x)
y = np.array(y)
if x.shape[1] != y.shape[1]:
raise ValueError(
msg = (
f"Number of columns in X and Y must be the same. X has shape {x.shape} "
f"and Y has shape {y.shape}."
)
raise ValueError(msg)
try:
import simsimd as simd # type: ignore[import-not-found]
@ -94,10 +96,11 @@ def maximal_marginal_relevance(
try:
import numpy as np
except ImportError as e:
raise ImportError(
msg = (
"maximal_marginal_relevance requires numpy to be installed. "
"Please install numpy with `pip install numpy`."
) from e
)
raise ImportError(msg) from e
if min(k, len(embedding_list)) <= 0:
return []

View File

@ -44,7 +44,7 @@ python = ">=3.12.4"
[tool.poetry.extras]
[tool.ruff.lint]
select = [ "B", "C4", "E", "F", "I", "N", "PIE", "SIM", "T201", "UP", "W",]
select = [ "B", "C4", "E", "EM", "F", "I", "N", "PIE", "SIM", "T201", "UP", "W",]
ignore = [ "UP007", "W293",]
[tool.coverage.run]

View File

@ -47,7 +47,8 @@ def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) ->
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
msg = "Cannot specify both `--only-extended` and `--only-core`."
raise ValueError(msg)
for item in items:
requires_marker = item.get_closest_marker("requires")

View File

@ -66,11 +66,13 @@ class InMemoryCacheBad(BaseCache):
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
raise NotImplementedError("This code should not be triggered")
msg = "This code should not be triggered"
raise NotImplementedError(msg)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
raise NotImplementedError("This code should not be triggered")
msg = "This code should not be triggered"
raise NotImplementedError(msg)
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""

View File

@ -76,17 +76,20 @@ def test_simple_serialization_secret() -> None:
def test__is_field_useful() -> None:
class ArrayObj:
def __bool__(self) -> bool:
raise ValueError("Truthiness can't be determined")
msg = "Truthiness can't be determined"
raise ValueError(msg)
def __eq__(self, other: object) -> bool:
return self # type: ignore[return-value]
class NonBoolObj:
def __bool__(self) -> bool:
raise ValueError("Truthiness can't be determined")
msg = "Truthiness can't be determined"
raise ValueError(msg)
def __eq__(self, other: object) -> bool:
raise ValueError("Equality can't be determined")
msg = "Equality can't be determined"
raise ValueError(msg)
default_x = ArrayObj()
default_y = NonBoolObj()

View File

@ -32,15 +32,13 @@ def test_base_generation_parser() -> None:
that support streaming
"""
if len(result) != 1:
raise NotImplementedError(
"This output parser can only be used with a single generation."
)
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
raise OutputParserException(
"This output parser can only be used with a chat generation."
)
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
@ -77,15 +75,13 @@ def test_base_transform_output_parser() -> None:
that support streaming
"""
if len(result) != 1:
raise NotImplementedError(
"This output parser can only be used with a single generation."
)
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
raise OutputParserException(
"This output parser can only be used with a chat generation."
)
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore

View File

@ -618,7 +618,8 @@ def test_prompt_falsy_vars(
elif template_format == "mustache":
template = "{{my_var}}"
else:
raise ValueError(f"Invalid template format: {template_format}")
msg = f"Invalid template format: {template_format}"
raise ValueError(msg)
prompt = PromptTemplate.from_template(template, template_format=template_format)

View File

@ -77,9 +77,8 @@ def _schema(obj: Any) -> dict:
"""Return the schema of the object."""
if not is_basemodel_subclass(obj):
raise TypeError(
f"Object must be a Pydantic BaseModel subclass. Got {type(obj)}"
)
msg = f"Object must be a Pydantic BaseModel subclass. Got {type(obj)}"
raise TypeError(msg)
# Remap to old style schema
if not hasattr(obj, "model_json_schema"): # V1 model
return obj.schema()

View File

@ -23,7 +23,8 @@ class MyRunnable(RunnableSerializable[str, str]):
@classmethod
def my_error(cls, values: dict[str, Any]) -> Any:
if "_my_hidden_property" in values:
raise ValueError("Cannot set _my_hidden_property")
msg = "Cannot set _my_hidden_property"
raise ValueError(msg)
return values
@model_validator(mode="after")

View File

@ -2824,7 +2824,8 @@ async def test_higher_order_lambda_runnable(
elif input["key"] == "english":
return itemgetter("input") | english_chain
else:
raise ValueError(f"Unknown key: {input['key']}")
msg = f"Unknown key: {input['key']}"
raise ValueError(msg)
chain: Runnable = input_map | router
assert dumps(chain, pretty=True) == snapshot
@ -2873,7 +2874,8 @@ async def test_higher_order_lambda_runnable(
elif input["key"] == "english":
return itemgetter("input") | english_chain
else:
raise ValueError(f"Unknown key: {input['key']}")
msg = f"Unknown key: {input['key']}"
raise ValueError(msg)
achain: Runnable = input_map | arouter
math_spy = mocker.spy(math_chain.__class__, "ainvoke")
@ -3065,7 +3067,8 @@ def test_map_stream() -> None:
streamed_chunks[0] == {"llm": "i"}
or {"chat": _any_id_ai_message_chunk(content="i")}
):
raise AssertionError(f"Got an unexpected chunk: {streamed_chunks[0]}")
msg = f"Got an unexpected chunk: {streamed_chunks[0]}"
raise AssertionError(msg)
assert len(streamed_chunks) == len(llm_res) + len(chat_res)
@ -3714,9 +3717,11 @@ def test_recursive_lambda() -> None:
def test_retrying(mocker: MockerFixture) -> None:
def _lambda(x: int) -> Union[int, Runnable]:
if x == 1:
raise ValueError("x is 1")
msg = "x is 1"
raise ValueError(msg)
elif x == 2:
raise RuntimeError("x is 2")
msg = "x is 2"
raise RuntimeError(msg)
else:
return x
@ -3777,9 +3782,11 @@ def test_retrying(mocker: MockerFixture) -> None:
async def test_async_retrying(mocker: MockerFixture) -> None:
def _lambda(x: int) -> Union[int, Runnable]:
if x == 1:
raise ValueError("x is 1")
msg = "x is 1"
raise ValueError(msg)
elif x == 2:
raise RuntimeError("x is 2")
msg = "x is 2"
raise RuntimeError(msg)
else:
return x
@ -3872,7 +3879,8 @@ def test_runnable_lambda_stream_with_callbacks() -> None:
def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
msg = "x is too large"
raise ValueError(msg)
# Check that the chain on error is invoked
with pytest.raises(ValueError):
@ -3950,7 +3958,8 @@ async def test_runnable_lambda_astream_with_callbacks() -> None:
def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
msg = "x is too large"
raise ValueError(msg)
# Check that the chain on error is invoked
with pytest.raises(ValueError):
@ -4285,7 +4294,8 @@ def test_runnable_branch_invoke() -> None:
# Test with single branch
def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
msg = "x is too large"
raise ValueError(msg)
branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error),
@ -4349,7 +4359,8 @@ def test_runnable_branch_invoke_callbacks() -> None:
def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
msg = "x is too large"
raise ValueError(msg)
branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error),
@ -4376,7 +4387,8 @@ async def test_runnable_branch_ainvoke_callbacks() -> None:
async def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
msg = "x is too large"
raise ValueError(msg)
branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error),
@ -4430,7 +4442,8 @@ def test_runnable_branch_stream_with_callbacks() -> None:
def raise_value_error(x: str) -> Any:
"""Raise a value error."""
raise ValueError(f"x is {x}")
msg = f"x is {x}"
raise ValueError(msg)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
@ -4507,7 +4520,8 @@ async def test_runnable_branch_astream_with_callbacks() -> None:
def raise_value_error(x: str) -> Any:
"""Raise a value error."""
raise ValueError(f"x is {x}")
msg = f"x is {x}"
raise ValueError(msg)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream

View File

@ -1596,7 +1596,8 @@ async def test_event_stream_with_retry() -> None:
def fail(inputs: str) -> None:
"""Simple func."""
raise Exception("fail")
msg = "fail"
raise Exception(msg)
chain = RunnableLambda(success) | RunnableLambda(fail).with_retry(
stop_after_attempt=1,

View File

@ -1552,7 +1552,8 @@ async def test_event_stream_with_retry() -> None:
def fail(inputs: str) -> None:
"""Simple func."""
raise Exception("fail")
msg = "fail"
raise Exception(msg)
chain = RunnableLambda(success) | RunnableLambda(fail).with_retry(
stop_after_attempt=1,
@ -2057,7 +2058,8 @@ class StreamingRunnable(Runnable[Input, Output]):
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
"""Invoke the runnable."""
raise ValueError("Server side error")
msg = "Server side error"
raise ValueError(msg)
def stream(
self,

View File

@ -293,7 +293,8 @@ async def test_runnable_sequence_parallel_trace_nesting(method: str) -> None:
elif method == "abatch":
res = (await parent.abatch([1], {"callbacks": cb}))[0] # type: ignore
else:
raise ValueError(f"Unknown method {method}")
msg = f"Unknown method {method}"
raise ValueError(msg)
assert res == 3
posts = _get_posts(mock_client_)
name_order = [
@ -345,7 +346,8 @@ async def test_runnable_sequence_parallel_trace_nesting(method: str) -> None:
), f"{name} not after {name_order[i-1]}"
prev_dotted_order = dotted_order
if name in dotted_order_map:
raise ValueError(f"Duplicate name {name}")
msg = f"Duplicate name {name}"
raise ValueError(msg)
dotted_order_map[name] = dotted_order
id_map[name] = posts[i]["id"]
parent_id_map[name] = posts[i].get("parent_run_id")

View File

@ -52,4 +52,5 @@ def test_importable_all_via_subprocess() -> None:
result = future.result() # Will raise an exception if the callable raised
code, module_name = result
if code != 0:
raise ValueError(f"Failed to import {module_name}.")
msg = f"Failed to import {module_name}."
raise ValueError(msg)

View File

@ -106,7 +106,8 @@ def test_is_basemodel_subclass() -> None:
assert is_basemodel_subclass(BaseModelV1)
else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}")
msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
def test_is_basemodel_instance() -> None:
@ -132,7 +133,8 @@ def test_is_basemodel_instance() -> None:
assert is_basemodel_instance(Bar(x=5))
else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}")
msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Only tests Pydantic v2")

View File

@ -177,7 +177,8 @@ def test_guard_import(
elif package is not None and pip_name is not None:
ret = guard_import(module_name, pip_name=pip_name, package=package)
else:
raise ValueError("Invalid test case")
msg = "Invalid test case"
raise ValueError(msg)
assert ret == expected
@ -204,7 +205,8 @@ def test_guard_import_failure(
elif package is not None and pip_name is not None:
guard_import(module_name, pip_name=pip_name, package=package)
else:
raise ValueError("Invalid test case")
msg = "Invalid test case"
raise ValueError(msg)
pip_name = pip_name or module_name.split(".")[0].replace("_", "-")
err_msg = (
f"Could not import {module_name} python package. "