core: Add ruff rules for error messages (EM) (#26965)

All auto-fixes

Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Christophe Bornet 2024-10-08 00:12:28 +02:00 committed by GitHub
parent 37ca468d03
commit d31ec8810a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
94 changed files with 777 additions and 523 deletions

View File

@ -51,15 +51,18 @@ def _validate_deprecation_params(
) -> None: ) -> None:
"""Validate the deprecation parameters.""" """Validate the deprecation parameters."""
if pending and removal: if pending and removal:
raise ValueError("A pending deprecation cannot have a scheduled removal") msg = "A pending deprecation cannot have a scheduled removal"
raise ValueError(msg)
if alternative and alternative_import: if alternative and alternative_import:
raise ValueError("Cannot specify both alternative and alternative_import") msg = "Cannot specify both alternative and alternative_import"
raise ValueError(msg)
if alternative_import and "." not in alternative_import: if alternative_import and "." not in alternative_import:
raise ValueError( msg = (
"alternative_import must be a fully qualified module path. Got " "alternative_import must be a fully qualified module path. Got "
f" {alternative_import}" f" {alternative_import}"
) )
raise ValueError(msg)
def deprecated( def deprecated(
@ -222,7 +225,8 @@ def deprecated(
if not _obj_type: if not _obj_type:
_obj_type = "attribute" _obj_type = "attribute"
if not _name: if not _name:
raise ValueError(f"Field {obj} must have a name to be deprecated.") msg = f"Field {obj} must have a name to be deprecated."
raise ValueError(msg)
old_doc = obj.description old_doc = obj.description
def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
@ -241,7 +245,8 @@ def deprecated(
if not _obj_type: if not _obj_type:
_obj_type = "attribute" _obj_type = "attribute"
if not _name: if not _name:
raise ValueError(f"Field {obj} must have a name to be deprecated.") msg = f"Field {obj} must have a name to be deprecated."
raise ValueError(msg)
old_doc = obj.description old_doc = obj.description
def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
@ -428,10 +433,11 @@ def warn_deprecated(
if not pending: if not pending:
if not removal: if not removal:
removal = f"in {removal}" if removal else "within ?? minor releases" removal = f"in {removal}" if removal else "within ?? minor releases"
raise NotImplementedError( msg = (
f"Need to determine which default deprecation schedule to use. " f"Need to determine which default deprecation schedule to use. "
f"{removal}" f"{removal}"
) )
raise NotImplementedError(msg)
else: else:
removal = f"in {removal}" removal = f"in {removal}"
@ -523,9 +529,8 @@ def rename_parameter(
@functools.wraps(f) @functools.wraps(f)
def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R: def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R:
if new in kwargs and old in kwargs: if new in kwargs and old in kwargs:
raise TypeError( msg = f"{f.__name__}() got multiple values for argument {new!r}"
f"{f.__name__}() got multiple values for argument {new!r}" raise TypeError(msg)
)
if old in kwargs: if old in kwargs:
warn_deprecated( warn_deprecated(
since, since,

View File

@ -59,7 +59,8 @@ def _key_from_id(id_: str) -> str:
elif wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_SET): elif wout_prefix.endswith(CONTEXT_CONFIG_SUFFIX_SET):
return wout_prefix[: -len(CONTEXT_CONFIG_SUFFIX_SET)] return wout_prefix[: -len(CONTEXT_CONFIG_SUFFIX_SET)]
else: else:
raise ValueError(f"Invalid context config id {id_}") msg = f"Invalid context config id {id_}"
raise ValueError(msg)
def _config_with_context( def _config_with_context(
@ -103,16 +104,15 @@ def _config_with_context(
for dep in deps_by_key[key]: for dep in deps_by_key[key]:
if key in deps_by_key[dep]: if key in deps_by_key[dep]:
raise ValueError( msg = f"Deadlock detected between context keys {key} and {dep}"
f"Deadlock detected between context keys {key} and {dep}" raise ValueError(msg)
)
if len(setters) != 1: if len(setters) != 1:
raise ValueError(f"Expected exactly one setter for context key {key}") msg = f"Expected exactly one setter for context key {key}"
raise ValueError(msg)
setter_idx = setters[0][1] setter_idx = setters[0][1]
if any(getter_idx < setter_idx for _, getter_idx in getters): if any(getter_idx < setter_idx for _, getter_idx in getters):
raise ValueError( msg = f"Context setter for key {key} must be defined after all getters."
f"Context setter for key {key} must be defined after all getters." raise ValueError(msg)
)
if getters: if getters:
context_funcs[getters[0][0].id] = partial(getter, events[key], values) context_funcs[getters[0][0].id] = partial(getter, events[key], values)
@ -271,9 +271,8 @@ class ContextSet(RunnableSerializable):
if spec.id.endswith(CONTEXT_CONFIG_SUFFIX_GET): if spec.id.endswith(CONTEXT_CONFIG_SUFFIX_GET):
getter_key = spec.id.split("/")[1] getter_key = spec.id.split("/")[1]
if getter_key in self.keys: if getter_key in self.keys:
raise ValueError( msg = f"Circular reference in context setter for key {getter_key}"
f"Circular reference in context setter for key {getter_key}" raise ValueError(msg)
)
return super().config_specs + [ return super().config_specs + [
ConfigurableFieldSpec( ConfigurableFieldSpec(
id=id_, id=id_,

View File

@ -160,7 +160,8 @@ class InMemoryCache(BaseCache):
""" """
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {} self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
if maxsize is not None and maxsize <= 0: if maxsize is not None and maxsize <= 0:
raise ValueError("maxsize must be greater than 0") msg = "maxsize must be greater than 0"
raise ValueError(msg)
self._maxsize = maxsize self._maxsize = maxsize
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:

View File

@ -275,9 +275,8 @@ class CallbackManagerMixin:
""" """
# NotImplementedError is thrown intentionally # NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown # Callback handler will fall back to on_llm_start if this is exception is thrown
raise NotImplementedError( msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`"
f"{self.__class__.__name__} does not implement `on_chat_model_start`" raise NotImplementedError(msg)
)
def on_retriever_start( def on_retriever_start(
self, self,
@ -523,9 +522,8 @@ class AsyncCallbackHandler(BaseCallbackHandler):
""" """
# NotImplementedError is thrown intentionally # NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown # Callback handler will fall back to on_llm_start if this is exception is thrown
raise NotImplementedError( msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`"
f"{self.__class__.__name__} does not implement `on_chat_model_start`" raise NotImplementedError(msg)
)
async def on_llm_new_token( async def on_llm_new_token(
self, self,

View File

@ -1510,11 +1510,12 @@ class CallbackManager(BaseCallbackManager):
.. versionadded:: 0.2.14 .. versionadded:: 0.2.14
""" """
if kwargs: if kwargs:
raise ValueError( msg = (
"The dispatcher API does not accept additional keyword arguments." "The dispatcher API does not accept additional keyword arguments."
"Please do not pass any additional keyword arguments, instead " "Please do not pass any additional keyword arguments, instead "
"include them in the data field." "include them in the data field."
) )
raise ValueError(msg)
if run_id is None: if run_id is None:
run_id = uuid.uuid4() run_id = uuid.uuid4()
@ -1989,11 +1990,12 @@ class AsyncCallbackManager(BaseCallbackManager):
run_id = uuid.uuid4() run_id = uuid.uuid4()
if kwargs: if kwargs:
raise ValueError( msg = (
"The dispatcher API does not accept additional keyword arguments." "The dispatcher API does not accept additional keyword arguments."
"Please do not pass any additional keyword arguments, instead " "Please do not pass any additional keyword arguments, instead "
"include them in the data field." "include them in the data field."
) )
raise ValueError(msg)
await ahandle_event( await ahandle_event(
self.handlers, self.handlers,
"on_custom_event", "on_custom_event",
@ -2336,11 +2338,12 @@ def _configure(
if v1_tracing_enabled_ and not tracing_v2_enabled_: if v1_tracing_enabled_ and not tracing_v2_enabled_:
# if both are enabled, can silently ignore the v1 tracer # if both are enabled, can silently ignore the v1 tracer
raise RuntimeError( msg = (
"Tracing using LangChainTracerV1 is no longer supported. " "Tracing using LangChainTracerV1 is no longer supported. "
"Please set the LANGCHAIN_TRACING_V2 environment variable to enable " "Please set the LANGCHAIN_TRACING_V2 environment variable to enable "
"tracing instead." "tracing instead."
) )
raise RuntimeError(msg)
tracer_project = _get_tracer_project() tracer_project = _get_tracer_project()
debug = _get_debug() debug = _get_debug()
@ -2519,13 +2522,14 @@ async def adispatch_custom_event(
# within a tool or a lambda and have the metadata events associated # within a tool or a lambda and have the metadata events associated
# with the parent run rather than have a new run id generated for each. # with the parent run rather than have a new run id generated for each.
if callback_manager.parent_run_id is None: if callback_manager.parent_run_id is None:
raise RuntimeError( msg = (
"Unable to dispatch an adhoc event without a parent run id." "Unable to dispatch an adhoc event without a parent run id."
"This function can only be called from within an existing run (e.g.," "This function can only be called from within an existing run (e.g.,"
"inside a tool or a RunnableLambda or a RunnableGenerator.)" "inside a tool or a RunnableLambda or a RunnableGenerator.)"
"If you are doing that and still seeing this error, try explicitly" "If you are doing that and still seeing this error, try explicitly"
"passing the config parameter to this function." "passing the config parameter to this function."
) )
raise RuntimeError(msg)
await callback_manager.on_custom_event( await callback_manager.on_custom_event(
name, name,
@ -2588,13 +2592,14 @@ def dispatch_custom_event(
# within a tool or a lambda and have the metadata events associated # within a tool or a lambda and have the metadata events associated
# with the parent run rather than have a new run id generated for each. # with the parent run rather than have a new run id generated for each.
if callback_manager.parent_run_id is None: if callback_manager.parent_run_id is None:
raise RuntimeError( msg = (
"Unable to dispatch an adhoc event without a parent run id." "Unable to dispatch an adhoc event without a parent run id."
"This function can only be called from within an existing run (e.g.," "This function can only be called from within an existing run (e.g.,"
"inside a tool or a RunnableLambda or a RunnableGenerator.)" "inside a tool or a RunnableLambda or a RunnableGenerator.)"
"If you are doing that and still seeing this error, try explicitly" "If you are doing that and still seeing this error, try explicitly"
"passing the config parameter to this function." "passing the config parameter to this function."
) )
raise RuntimeError(msg)
callback_manager.on_custom_event( callback_manager.on_custom_event(
name, name,
data, data,

View File

@ -157,10 +157,11 @@ class BaseChatMessageHistory(ABC):
# method, so we should use it. # method, so we should use it.
self.add_messages([message]) self.add_messages([message])
else: else:
raise NotImplementedError( msg = (
"add_message is not implemented for this class. " "add_message is not implemented for this class. "
"Please implement add_message or add_messages." "Please implement add_message or add_messages."
) )
raise NotImplementedError(msg)
def add_messages(self, messages: Sequence[BaseMessage]) -> None: def add_messages(self, messages: Sequence[BaseMessage]) -> None:
"""Add a list of messages. """Add a list of messages.

View File

@ -53,11 +53,12 @@ class BaseLoader(ABC): # noqa: B024
try: try:
from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_text_splitters import RecursiveCharacterTextSplitter
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"Unable to import from langchain_text_splitters. Please specify " "Unable to import from langchain_text_splitters. Please specify "
"text_splitter or install langchain_text_splitters with " "text_splitter or install langchain_text_splitters with "
"`pip install -U langchain-text-splitters`." "`pip install -U langchain-text-splitters`."
) from e )
raise ImportError(msg) from e
_text_splitter: TextSplitter = RecursiveCharacterTextSplitter() _text_splitter: TextSplitter = RecursiveCharacterTextSplitter()
else: else:
@ -71,9 +72,8 @@ class BaseLoader(ABC): # noqa: B024
"""A lazy loader for Documents.""" """A lazy loader for Documents."""
if type(self).load != BaseLoader.load: if type(self).load != BaseLoader.load:
return iter(self.load()) return iter(self.load())
raise NotImplementedError( msg = f"{self.__class__.__name__} does not implement lazy_load()"
f"{self.__class__.__name__} does not implement lazy_load()" raise NotImplementedError(msg)
)
async def alazy_load(self) -> AsyncIterator[Document]: async def alazy_load(self) -> AsyncIterator[Document]:
"""A lazy loader for Documents.""" """A lazy loader for Documents."""

View File

@ -142,7 +142,8 @@ class Blob(BaseMedia):
def check_blob_is_valid(cls, values: dict[str, Any]) -> Any: def check_blob_is_valid(cls, values: dict[str, Any]) -> Any:
"""Verify that either data or path is provided.""" """Verify that either data or path is provided."""
if "data" not in values and "path" not in values: if "data" not in values and "path" not in values:
raise ValueError("Either data or path must be provided") msg = "Either data or path must be provided"
raise ValueError(msg)
return values return values
def as_string(self) -> str: def as_string(self) -> str:
@ -155,7 +156,8 @@ class Blob(BaseMedia):
elif isinstance(self.data, str): elif isinstance(self.data, str):
return self.data return self.data
else: else:
raise ValueError(f"Unable to get string for blob {self}") msg = f"Unable to get string for blob {self}"
raise ValueError(msg)
def as_bytes(self) -> bytes: def as_bytes(self) -> bytes:
"""Read data as bytes.""" """Read data as bytes."""
@ -167,7 +169,8 @@ class Blob(BaseMedia):
with open(str(self.path), "rb") as f: with open(str(self.path), "rb") as f:
return f.read() return f.read()
else: else:
raise ValueError(f"Unable to get bytes for blob {self}") msg = f"Unable to get bytes for blob {self}"
raise ValueError(msg)
@contextlib.contextmanager @contextlib.contextmanager
def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]:
@ -178,7 +181,8 @@ class Blob(BaseMedia):
with open(str(self.path), "rb") as f: with open(str(self.path), "rb") as f:
yield f yield f
else: else:
raise NotImplementedError(f"Unable to convert blob {self}") msg = f"Unable to convert blob {self}"
raise NotImplementedError(msg)
@classmethod @classmethod
def from_path( def from_path(

View File

@ -41,10 +41,11 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
): ):
super().__init__(error) super().__init__(error)
if send_to_llm and (observation is None or llm_output is None): if send_to_llm and (observation is None or llm_output is None):
raise ValueError( msg = (
"Arguments 'observation' & 'llm_output'" "Arguments 'observation' & 'llm_output'"
" are required if 'send_to_llm' is True" " are required if 'send_to_llm' is True"
) )
raise ValueError(msg)
self.observation = observation self.observation = observation
self.llm_output = llm_output self.llm_output = llm_output
self.send_to_llm = send_to_llm self.send_to_llm = send_to_llm

View File

@ -73,20 +73,22 @@ class _HashedDocument(Document):
for key in forbidden_keys: for key in forbidden_keys:
if key in metadata: if key in metadata:
raise ValueError( msg = (
f"Metadata cannot contain key {key} as it " f"Metadata cannot contain key {key} as it "
f"is reserved for internal use." f"is reserved for internal use."
) )
raise ValueError(msg)
content_hash = str(_hash_string_to_uuid(content)) content_hash = str(_hash_string_to_uuid(content))
try: try:
metadata_hash = str(_hash_nested_dict_to_uuid(metadata)) metadata_hash = str(_hash_nested_dict_to_uuid(metadata))
except Exception as e: except Exception as e:
raise ValueError( msg = (
f"Failed to hash metadata: {e}. " f"Failed to hash metadata: {e}. "
f"Please use a dict that can be serialized using json." f"Please use a dict that can be serialized using json."
) from e )
raise ValueError(msg) from e
values["content_hash"] = content_hash values["content_hash"] = content_hash
values["metadata_hash"] = metadata_hash values["metadata_hash"] = metadata_hash
@ -154,10 +156,11 @@ def _get_source_id_assigner(
elif callable(source_id_key): elif callable(source_id_key):
return source_id_key return source_id_key
else: else:
raise ValueError( msg = (
f"source_id_key should be either None, a string or a callable. " f"source_id_key should be either None, a string or a callable. "
f"Got {source_id_key} of type {type(source_id_key)}." f"Got {source_id_key} of type {type(source_id_key)}."
) )
raise ValueError(msg)
def _deduplicate_in_order( def _deduplicate_in_order(
@ -269,13 +272,15 @@ def index(
ValueError: If source_id_key is not None, but is not a string or callable. ValueError: If source_id_key is not None, but is not a string or callable.
""" """
if cleanup not in {"incremental", "full", None}: if cleanup not in {"incremental", "full", None}:
raise ValueError( msg = (
f"cleanup should be one of 'incremental', 'full' or None. " f"cleanup should be one of 'incremental', 'full' or None. "
f"Got {cleanup}." f"Got {cleanup}."
) )
raise ValueError(msg)
if cleanup == "incremental" and source_id_key is None: if cleanup == "incremental" and source_id_key is None:
raise ValueError("Source id key is required when cleanup mode is incremental.") msg = "Source id key is required when cleanup mode is incremental."
raise ValueError(msg)
destination = vector_store # Renaming internally for clarity destination = vector_store # Renaming internally for clarity
@ -286,21 +291,24 @@ def index(
for method in methods: for method in methods:
if not hasattr(destination, method): if not hasattr(destination, method):
raise ValueError( msg = (
f"Vectorstore {destination} does not have required method {method}" f"Vectorstore {destination} does not have required method {method}"
) )
raise ValueError(msg)
if type(destination).delete == VectorStore.delete: if type(destination).delete == VectorStore.delete:
# Checking if the vectorstore has overridden the default delete method # Checking if the vectorstore has overridden the default delete method
# implementation which just raises a NotImplementedError # implementation which just raises a NotImplementedError
raise ValueError("Vectorstore has not implemented the delete method") msg = "Vectorstore has not implemented the delete method"
raise ValueError(msg)
elif isinstance(destination, DocumentIndex): elif isinstance(destination, DocumentIndex):
pass pass
else: else:
raise TypeError( msg = (
f"Vectorstore should be either a VectorStore or a DocumentIndex. " f"Vectorstore should be either a VectorStore or a DocumentIndex. "
f"Got {type(destination)}." f"Got {type(destination)}."
) )
raise TypeError(msg)
if isinstance(docs_source, BaseLoader): if isinstance(docs_source, BaseLoader):
try: try:
@ -334,12 +342,13 @@ def index(
# If the cleanup mode is incremental, source ids are required. # If the cleanup mode is incremental, source ids are required.
for source_id, hashed_doc in zip(source_ids, hashed_docs): for source_id, hashed_doc in zip(source_ids, hashed_docs):
if source_id is None: if source_id is None:
raise ValueError( msg = (
"Source ids are required when cleanup mode is incremental. " "Source ids are required when cleanup mode is incremental. "
f"Document that starts with " f"Document that starts with "
f"content: {hashed_doc.page_content[:100]} was not assigned " f"content: {hashed_doc.page_content[:100]} was not assigned "
f"as source id." f"as source id."
) )
raise ValueError(msg)
# source ids cannot be None after for loop above. # source ids cannot be None after for loop above.
source_ids = cast(Sequence[str], source_ids) # type: ignore[assignment] source_ids = cast(Sequence[str], source_ids) # type: ignore[assignment]
@ -400,7 +409,8 @@ def index(
# mypy isn't good enough to determine that source ids cannot be None # mypy isn't good enough to determine that source ids cannot be None
# here due to a check that's happening above, so we check again. # here due to a check that's happening above, so we check again.
if any(source_id is None for source_id in source_ids): if any(source_id is None for source_id in source_ids):
raise AssertionError("Source ids cannot be if cleanup=='incremental'.") msg = "Source ids cannot be if cleanup=='incremental'."
raise AssertionError(msg)
indexed_source_ids = cast( indexed_source_ids = cast(
Sequence[str], [source_id_assigner(doc) for doc in docs_to_index] Sequence[str], [source_id_assigner(doc) for doc in docs_to_index]
@ -514,13 +524,15 @@ async def aindex(
""" """
if cleanup not in {"incremental", "full", None}: if cleanup not in {"incremental", "full", None}:
raise ValueError( msg = (
f"cleanup should be one of 'incremental', 'full' or None. " f"cleanup should be one of 'incremental', 'full' or None. "
f"Got {cleanup}." f"Got {cleanup}."
) )
raise ValueError(msg)
if cleanup == "incremental" and source_id_key is None: if cleanup == "incremental" and source_id_key is None:
raise ValueError("Source id key is required when cleanup mode is incremental.") msg = "Source id key is required when cleanup mode is incremental."
raise ValueError(msg)
destination = vector_store # Renaming internally for clarity destination = vector_store # Renaming internally for clarity
@ -532,21 +544,24 @@ async def aindex(
for method in methods: for method in methods:
if not hasattr(destination, method): if not hasattr(destination, method):
raise ValueError( msg = (
f"Vectorstore {destination} does not have required method {method}" f"Vectorstore {destination} does not have required method {method}"
) )
raise ValueError(msg)
if type(destination).adelete == VectorStore.adelete: if type(destination).adelete == VectorStore.adelete:
# Checking if the vectorstore has overridden the default delete method # Checking if the vectorstore has overridden the default delete method
# implementation which just raises a NotImplementedError # implementation which just raises a NotImplementedError
raise ValueError("Vectorstore has not implemented the delete method") msg = "Vectorstore has not implemented the delete method"
raise ValueError(msg)
elif isinstance(destination, DocumentIndex): elif isinstance(destination, DocumentIndex):
pass pass
else: else:
raise TypeError( msg = (
f"Vectorstore should be either a VectorStore or a DocumentIndex. " f"Vectorstore should be either a VectorStore or a DocumentIndex. "
f"Got {type(destination)}." f"Got {type(destination)}."
) )
raise TypeError(msg)
async_doc_iterator: AsyncIterator[Document] async_doc_iterator: AsyncIterator[Document]
if isinstance(docs_source, BaseLoader): if isinstance(docs_source, BaseLoader):
try: try:
@ -588,12 +603,13 @@ async def aindex(
# If the cleanup mode is incremental, source ids are required. # If the cleanup mode is incremental, source ids are required.
for source_id, hashed_doc in zip(source_ids, hashed_docs): for source_id, hashed_doc in zip(source_ids, hashed_docs):
if source_id is None: if source_id is None:
raise ValueError( msg = (
"Source ids are required when cleanup mode is incremental. " "Source ids are required when cleanup mode is incremental. "
f"Document that starts with " f"Document that starts with "
f"content: {hashed_doc.page_content[:100]} was not assigned " f"content: {hashed_doc.page_content[:100]} was not assigned "
f"as source id." f"as source id."
) )
raise ValueError(msg)
# source ids cannot be None after for loop above. # source ids cannot be None after for loop above.
source_ids = cast(Sequence[str], source_ids) source_ids = cast(Sequence[str], source_ids)
@ -654,7 +670,8 @@ async def aindex(
# mypy isn't good enough to determine that source ids cannot be None # mypy isn't good enough to determine that source ids cannot be None
# here due to a check that's happening above, so we check again. # here due to a check that's happening above, so we check again.
if any(source_id is None for source_id in source_ids): if any(source_id is None for source_id in source_ids):
raise AssertionError("Source ids cannot be if cleanup=='incremental'.") msg = "Source ids cannot be if cleanup=='incremental'."
raise AssertionError(msg)
indexed_source_ids = cast( indexed_source_ids = cast(
Sequence[str], [source_id_assigner(doc) for doc in docs_to_index] Sequence[str], [source_id_assigner(doc) for doc in docs_to_index]

View File

@ -290,11 +290,13 @@ class InMemoryRecordManager(RecordManager):
""" """
if group_ids and len(keys) != len(group_ids): if group_ids and len(keys) != len(group_ids):
raise ValueError("Length of keys must match length of group_ids") msg = "Length of keys must match length of group_ids"
raise ValueError(msg)
for index, key in enumerate(keys): for index, key in enumerate(keys):
group_id = group_ids[index] if group_ids else None group_id = group_ids[index] if group_ids else None
if time_at_least and time_at_least > self.get_time(): if time_at_least and time_at_least > self.get_time():
raise ValueError("time_at_least must be in the past") msg = "time_at_least must be in the past"
raise ValueError(msg)
self.records[key] = {"group_id": group_id, "updated_at": self.get_time()} self.records[key] = {"group_id": group_id, "updated_at": self.get_time()}
async def aupdate( async def aupdate(

View File

@ -47,7 +47,8 @@ class InMemoryDocumentIndex(DocumentIndex):
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse: def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID.""" """Delete by ID."""
if ids is None: if ids is None:
raise ValueError("IDs must be provided for deletion") msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = [] ok_ids = []

View File

@ -60,11 +60,12 @@ def get_tokenizer() -> Any:
try: try:
from transformers import GPT2TokenizerFast # type: ignore[import] from transformers import GPT2TokenizerFast # type: ignore[import]
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"Could not import transformers python package. " "Could not import transformers python package. "
"This is needed in order to calculate get_token_ids. " "This is needed in order to calculate get_token_ids. "
"Please install it with `pip install transformers`." "Please install it with `pip install transformers`."
) from e )
raise ImportError(msg) from e
# create a GPT-2 tokenizer instance # create a GPT-2 tokenizer instance
return GPT2TokenizerFast.from_pretrained("gpt2") return GPT2TokenizerFast.from_pretrained("gpt2")

View File

@ -89,7 +89,8 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
if generation: if generation:
generation += list(stream) generation += list(stream)
if generation is None: if generation is None:
raise ValueError("No generations found in stream.") msg = "No generations found in stream."
raise ValueError(msg)
return ChatResult( return ChatResult(
generations=[ generations=[
ChatGeneration( ChatGeneration(
@ -265,10 +266,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
elif isinstance(input, Sequence): elif isinstance(input, Sequence):
return ChatPromptValue(messages=convert_to_messages(input)) return ChatPromptValue(messages=convert_to_messages(input))
else: else:
raise ValueError( msg = (
f"Invalid input type {type(input)}. " f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages." "Must be a PromptValue, str, or list of BaseMessages."
) )
raise ValueError(msg)
def invoke( def invoke(
self, self,
@ -817,9 +819,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
elif self.cache is None: elif self.cache is None:
pass pass
else: else:
raise ValueError( msg = "Asked to cache, but no cache found at `langchain.cache`."
"Asked to cache, but no cache found at `langchain.cache`." raise ValueError(msg)
)
# Apply the rate limiter after checking the cache, since # Apply the rate limiter after checking the cache, since
# we usually don't want to rate limit cache lookups, but # we usually don't want to rate limit cache lookups, but
@ -891,9 +892,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
elif self.cache is None: elif self.cache is None:
pass pass
else: else:
raise ValueError( msg = "Asked to cache, but no cache found at `langchain.cache`."
"Asked to cache, but no cache found at `langchain.cache`." raise ValueError(msg)
)
# Apply the rate limiter after checking the cache, since # Apply the rate limiter after checking the cache, since
# we usually don't want to rate limit cache lookups, but # we usually don't want to rate limit cache lookups, but
@ -1020,7 +1020,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
if isinstance(generation, ChatGeneration): if isinstance(generation, ChatGeneration):
return generation.message return generation.message
else: else:
raise ValueError("Unexpected generation type") msg = "Unexpected generation type"
raise ValueError(msg)
async def _call_async( async def _call_async(
self, self,
@ -1036,7 +1037,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
if isinstance(generation, ChatGeneration): if isinstance(generation, ChatGeneration):
return generation.message return generation.message
else: else:
raise ValueError("Unexpected generation type") msg = "Unexpected generation type"
raise ValueError(msg)
@deprecated("0.1.7", alternative="invoke", removal="1.0") @deprecated("0.1.7", alternative="invoke", removal="1.0")
def call_as_llm( def call_as_llm(
@ -1053,7 +1055,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
if isinstance(result.content, str): if isinstance(result.content, str):
return result.content return result.content
else: else:
raise ValueError("Cannot use predict when output is not a string.") msg = "Cannot use predict when output is not a string."
raise ValueError(msg)
@deprecated("0.1.7", alternative="invoke", removal="1.0") @deprecated("0.1.7", alternative="invoke", removal="1.0")
def predict_messages( def predict_messages(
@ -1077,7 +1080,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
if isinstance(result.content, str): if isinstance(result.content, str):
return result.content return result.content
else: else:
raise ValueError("Cannot use predict when output is not a string.") msg = "Cannot use predict when output is not a string."
raise ValueError(msg)
@deprecated("0.1.7", alternative="ainvoke", removal="1.0") @deprecated("0.1.7", alternative="ainvoke", removal="1.0")
async def apredict_messages( async def apredict_messages(
@ -1220,7 +1224,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
# } # }
""" # noqa: E501 """ # noqa: E501
if kwargs: if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}") msg = f"Received unsupported arguments {kwargs}"
raise ValueError(msg)
from langchain_core.output_parsers.openai_tools import ( from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser, JsonOutputKeyToolsParser,
@ -1228,9 +1233,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
) )
if self.bind_tools is BaseChatModel.bind_tools: if self.bind_tools is BaseChatModel.bind_tools:
raise NotImplementedError( msg = "with_structured_output is not implemented for this model."
"with_structured_output is not implemented for this model." raise NotImplementedError(msg)
)
llm = self.bind_tools([schema], tool_choice="any") llm = self.bind_tools([schema], tool_choice="any")
if isinstance(schema, type) and is_basemodel_subclass(schema): if isinstance(schema, type) and is_basemodel_subclass(schema):
output_parser: OutputParserLike = PydanticToolsParser( output_parser: OutputParserLike = PydanticToolsParser(

View File

@ -238,18 +238,20 @@ class GenericFakeChatModel(BaseChatModel):
messages, stop=stop, run_manager=run_manager, **kwargs messages, stop=stop, run_manager=run_manager, **kwargs
) )
if not isinstance(chat_result, ChatResult): if not isinstance(chat_result, ChatResult):
raise ValueError( msg = (
f"Expected generate to return a ChatResult, " f"Expected generate to return a ChatResult, "
f"but got {type(chat_result)} instead." f"but got {type(chat_result)} instead."
) )
raise ValueError(msg)
message = chat_result.generations[0].message message = chat_result.generations[0].message
if not isinstance(message, AIMessage): if not isinstance(message, AIMessage):
raise ValueError( msg = (
f"Expected invoke to return an AIMessage, " f"Expected invoke to return an AIMessage, "
f"but got {type(message)} instead." f"but got {type(message)} instead."
) )
raise ValueError(msg)
content = message.content content = message.content

View File

@ -135,15 +135,17 @@ def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
elif cache is True: elif cache is True:
llm_cache = get_llm_cache() llm_cache = get_llm_cache()
if llm_cache is None: if llm_cache is None:
raise ValueError( msg = (
"No global cache was configured. Use `set_llm_cache`." "No global cache was configured. Use `set_llm_cache`."
"to set a global cache if you want to use a global cache." "to set a global cache if you want to use a global cache."
"Otherwise either pass a cache object or set cache to False/None" "Otherwise either pass a cache object or set cache to False/None"
) )
raise ValueError(msg)
elif cache is False: elif cache is False:
llm_cache = None llm_cache = None
else: else:
raise ValueError(f"Unsupported cache value {cache}") msg = f"Unsupported cache value {cache}"
raise ValueError(msg)
return llm_cache return llm_cache
@ -332,10 +334,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
elif isinstance(input, Sequence): elif isinstance(input, Sequence):
return ChatPromptValue(messages=convert_to_messages(input)) return ChatPromptValue(messages=convert_to_messages(input))
else: else:
raise ValueError( msg = (
f"Invalid input type {type(input)}. " f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages." "Must be a PromptValue, str, or list of BaseMessages."
) )
raise ValueError(msg)
def _get_ls_params( def _get_ls_params(
self, self,
@ -842,10 +845,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
prompt and additional model provider-specific output. prompt and additional model provider-specific output.
""" """
if not isinstance(prompts, list): if not isinstance(prompts, list):
raise ValueError( msg = (
"Argument 'prompts' is expected to be of type List[str], received" "Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}." f" argument of type {type(prompts)}."
) )
raise ValueError(msg)
# Create callback managers # Create callback managers
if isinstance(metadata, list): if isinstance(metadata, list):
metadata = [ metadata = [
@ -989,10 +993,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
return [None] * len(prompts) return [None] * len(prompts)
if isinstance(run_id, list): if isinstance(run_id, list):
if len(run_id) != len(prompts): if len(run_id) != len(prompts):
raise ValueError( msg = (
"Number of manually provided run_id's does not match batch length." "Number of manually provided run_id's does not match batch length."
f" {len(run_id)} != {len(prompts)}" f" {len(run_id)} != {len(prompts)}"
) )
raise ValueError(msg)
return run_id return run_id
return [run_id] + [None] * (len(prompts) - 1) return [run_id] + [None] * (len(prompts) - 1)
@ -1262,11 +1267,12 @@ class BaseLLM(BaseLanguageModel[str], ABC):
ValueError: If the prompt is not a string. ValueError: If the prompt is not a string.
""" """
if not isinstance(prompt, str): if not isinstance(prompt, str):
raise ValueError( msg = (
"Argument `prompt` is expected to be a string. Instead found " "Argument `prompt` is expected to be a string. Instead found "
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
"`generate` instead." "`generate` instead."
) )
raise ValueError(msg)
return ( return (
self.generate( self.generate(
[prompt], [prompt],
@ -1387,7 +1393,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
with open(file_path, "w") as f: with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False) yaml.dump(prompt_dict, f, default_flow_style=False)
else: else:
raise ValueError(f"{save_path} must be json or yaml") msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
class LLM(BaseLLM): class LLM(BaseLLM):

View File

@ -37,7 +37,8 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
ValueError: If `default` is passed as a kwarg. ValueError: If `default` is passed as a kwarg.
""" """
if "default" in kwargs: if "default" in kwargs:
raise ValueError("`default` should not be passed to dumps") msg = "`default` should not be passed to dumps"
raise ValueError(msg)
try: try:
if pretty: if pretty:
indent = kwargs.pop("indent", 2) indent = kwargs.pop("indent", 2)

View File

@ -96,17 +96,19 @@ class Reviver:
else: else:
if self.secrets_from_env and key in os.environ and os.environ[key]: if self.secrets_from_env and key in os.environ and os.environ[key]:
return os.environ[key] return os.environ[key]
raise KeyError(f'Missing key "{key}" in load(secrets_map)') msg = f'Missing key "{key}" in load(secrets_map)'
raise KeyError(msg)
if ( if (
value.get("lc") == 1 value.get("lc") == 1
and value.get("type") == "not_implemented" and value.get("type") == "not_implemented"
and value.get("id") is not None and value.get("id") is not None
): ):
raise NotImplementedError( msg = (
"Trying to load an object that doesn't implement " "Trying to load an object that doesn't implement "
f"serialization: {value}" f"serialization: {value}"
) )
raise NotImplementedError(msg)
if ( if (
value.get("lc") == 1 value.get("lc") == 1
@ -121,7 +123,8 @@ class Reviver:
# The root namespace ["langchain"] is not a valid identifier. # The root namespace ["langchain"] is not a valid identifier.
or namespace == ["langchain"] or namespace == ["langchain"]
): ):
raise ValueError(f"Invalid namespace: {value}") msg = f"Invalid namespace: {value}"
raise ValueError(msg)
# Has explicit import path. # Has explicit import path.
elif mapping_key in self.import_mappings: elif mapping_key in self.import_mappings:
import_path = self.import_mappings[mapping_key] import_path = self.import_mappings[mapping_key]
@ -130,11 +133,12 @@ class Reviver:
# Import module # Import module
mod = importlib.import_module(".".join(import_dir)) mod = importlib.import_module(".".join(import_dir))
elif namespace[0] in DISALLOW_LOAD_FROM_PATH: elif namespace[0] in DISALLOW_LOAD_FROM_PATH:
raise ValueError( msg = (
"Trying to deserialize something that cannot " "Trying to deserialize something that cannot "
"be deserialized in current version of langchain-core: " "be deserialized in current version of langchain-core: "
f"{mapping_key}." f"{mapping_key}."
) )
raise ValueError(msg)
# Otherwise, treat namespace as path. # Otherwise, treat namespace as path.
else: else:
mod = importlib.import_module(".".join(namespace)) mod = importlib.import_module(".".join(namespace))
@ -143,7 +147,8 @@ class Reviver:
# The class must be a subclass of Serializable. # The class must be a subclass of Serializable.
if not issubclass(cls, Serializable): if not issubclass(cls, Serializable):
raise ValueError(f"Invalid namespace: {value}") msg = f"Invalid namespace: {value}"
raise ValueError(msg)
# We don't need to recurse on kwargs # We don't need to recurse on kwargs
# as json.loads will do that for us. # as json.loads will do that for us.

View File

@ -215,11 +215,12 @@ class Serializable(BaseModel, ABC):
for attr in deprecated_attributes: for attr in deprecated_attributes:
if hasattr(cls, attr): if hasattr(cls, attr):
raise ValueError( msg = (
f"Class {self.__class__} has a deprecated " f"Class {self.__class__} has a deprecated "
f"attribute {attr}. Please use the corresponding " f"attribute {attr}. Please use the corresponding "
f"classmethod instead." f"classmethod instead."
) )
raise ValueError(msg)
# Get a reference to self bound to each class in the MRO # Get a reference to self bound to each class in the MRO
this = cast(Serializable, self if cls is None else super(cls, self)) this = cast(Serializable, self if cls is None else super(cls, self))

View File

@ -373,7 +373,8 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
) )
) )
else: else:
raise ValueError("Malformed args.") msg = "Malformed args."
raise ValueError(msg)
except Exception: except Exception:
invalid_tool_calls.append( invalid_tool_calls.append(
create_invalid_tool_call( create_invalid_tool_call(
@ -402,9 +403,8 @@ def add_ai_message_chunks(
) -> AIMessageChunk: ) -> AIMessageChunk:
"""Add multiple AIMessageChunks together.""" """Add multiple AIMessageChunks together."""
if any(left.example != o.example for o in others): if any(left.example != o.example for o in others):
raise ValueError( msg = "Cannot concatenate AIMessageChunks with different example values."
"Cannot concatenate AIMessageChunks with different example values." raise ValueError(msg)
)
content = merge_content(left.content, *(o.content for o in others)) content = merge_content(left.content, *(o.content for o in others))
additional_kwargs = merge_dicts( additional_kwargs = merge_dicts(

View File

@ -223,11 +223,12 @@ class BaseMessageChunk(BaseMessage):
response_metadata=response_metadata, response_metadata=response_metadata,
) )
else: else:
raise TypeError( msg = (
'unsupported operand type(s) for +: "' 'unsupported operand type(s) for +: "'
f"{self.__class__.__name__}" f"{self.__class__.__name__}"
f'" and "{other.__class__.__name__}"' f'" and "{other.__class__.__name__}"'
) )
raise TypeError(msg)
def message_to_dict(message: BaseMessage) -> dict: def message_to_dict(message: BaseMessage) -> dict:

View File

@ -48,9 +48,8 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk): if isinstance(other, ChatMessageChunk):
if self.role != other.role: if self.role != other.role:
raise ValueError( msg = "Cannot concatenate ChatMessageChunks with different roles."
"Cannot concatenate ChatMessageChunks with different roles." raise ValueError(msg)
)
return self.__class__( return self.__class__(
role=self.role, role=self.role,

View File

@ -54,9 +54,8 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, FunctionMessageChunk): if isinstance(other, FunctionMessageChunk):
if self.name != other.name: if self.name != other.name:
raise ValueError( msg = "Cannot concatenate FunctionMessageChunks with different names."
"Cannot concatenate FunctionMessageChunks with different names." raise ValueError(msg)
)
return self.__class__( return self.__class__(
name=self.name, name=self.name,

View File

@ -20,7 +20,8 @@ class RemoveMessage(BaseMessage):
ValueError: If the 'content' field is passed in kwargs. ValueError: If the 'content' field is passed in kwargs.
""" """
if kwargs.pop("content", None): if kwargs.pop("content", None):
raise ValueError("RemoveMessage does not support 'content' field.") msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
return super().__init__("", id=id, **kwargs) return super().__init__("", id=id, **kwargs)

View File

@ -94,11 +94,12 @@ class ToolMessage(BaseMessage):
try: try:
values["content"] = str(content) values["content"] = str(content)
except ValueError as e: except ValueError as e:
raise ValueError( msg = (
"ToolMessage content should be a string or a list of string/dicts. " "ToolMessage content should be a string or a list of string/dicts. "
f"Received:\n\n{content=}\n\n which could not be coerced into a " f"Received:\n\n{content=}\n\n which could not be coerced into a "
"string." "string."
) from e )
raise ValueError(msg) from e
elif isinstance(content, list): elif isinstance(content, list):
values["content"] = [] values["content"] = []
for i, x in enumerate(content): for i, x in enumerate(content):
@ -106,12 +107,13 @@ class ToolMessage(BaseMessage):
try: try:
values["content"].append(str(x)) values["content"].append(str(x))
except ValueError as e: except ValueError as e:
raise ValueError( msg = (
"ToolMessage content should be a string or a list of " "ToolMessage content should be a string or a list of "
"string/dicts. Received a list but " "string/dicts. Received a list but "
f"element ToolMessage.content[{i}] is not a dict and could " f"element ToolMessage.content[{i}] is not a dict and could "
f"not be coerced to a string.:\n\n{x}" f"not be coerced to a string.:\n\n{x}"
) from e )
raise ValueError(msg) from e
else: else:
values["content"].append(x) values["content"].append(x)
else: else:
@ -147,9 +149,8 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ToolMessageChunk): if isinstance(other, ToolMessageChunk):
if self.tool_call_id != other.tool_call_id: if self.tool_call_id != other.tool_call_id:
raise ValueError( msg = "Cannot concatenate ToolMessageChunks with different names."
"Cannot concatenate ToolMessageChunks with different names." raise ValueError(msg)
)
return self.__class__( return self.__class__(
tool_call_id=self.tool_call_id, tool_call_id=self.tool_call_id,

View File

@ -51,10 +51,11 @@ def _get_type(v: Any) -> str:
elif hasattr(v, "type"): elif hasattr(v, "type"):
return v.type return v.type
else: else:
raise TypeError( msg = (
f"Expected either a dictionary with a 'type' key or an object " f"Expected either a dictionary with a 'type' key or an object "
f"with a 'type' attribute. Instead got type {type(v)}." f"with a 'type' attribute. Instead got type {type(v)}."
) )
raise TypeError(msg)
AnyMessage = Annotated[ AnyMessage = Annotated[
@ -120,7 +121,8 @@ def get_buffer_string(
elif isinstance(m, ChatMessage): elif isinstance(m, ChatMessage):
role = m.role role = m.role
else: else:
raise ValueError(f"Got unsupported message type: {m}") msg = f"Got unsupported message type: {m}"
raise ValueError(msg)
message = f"{role}: {m.content}" message = f"{role}: {m.content}"
if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs: if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs:
message += f"{m.additional_kwargs['function_call']}" message += f"{m.additional_kwargs['function_call']}"
@ -158,7 +160,8 @@ def _message_from_dict(message: dict) -> BaseMessage:
elif _type == "ChatMessageChunk": elif _type == "ChatMessageChunk":
return ChatMessageChunk(**message["data"]) return ChatMessageChunk(**message["data"])
else: else:
raise ValueError(f"Got unexpected message type: {_type}") msg = f"Got unexpected message type: {_type}"
raise ValueError(msg)
def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]: def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]:
@ -266,10 +269,11 @@ def _create_message_from_message_type(
elif message_type == "remove": elif message_type == "remove":
message = RemoveMessage(**kwargs) message = RemoveMessage(**kwargs)
else: else:
raise ValueError( msg = (
f"Unexpected message type: '{message_type}'. Use one of 'human'," f"Unexpected message type: '{message_type}'. Use one of 'human',"
f" 'user', 'ai', 'assistant', 'function', 'tool', or 'system'." f" 'user', 'ai', 'assistant', 'function', 'tool', or 'system'."
) )
raise ValueError(msg)
return message return message
@ -312,14 +316,14 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
# None msg content is not allowed # None msg content is not allowed
msg_content = msg_kwargs.pop("content") or "" msg_content = msg_kwargs.pop("content") or ""
except KeyError as e: except KeyError as e:
raise ValueError( msg = f"Message dict must contain 'role' and 'content' keys, got {message}"
f"Message dict must contain 'role' and 'content' keys, got {message}" raise ValueError(msg) from e
) from e
_message = _create_message_from_message_type( _message = _create_message_from_message_type(
msg_type, msg_content, **msg_kwargs msg_type, msg_content, **msg_kwargs
) )
else: else:
raise NotImplementedError(f"Unsupported message type: {type(message)}") msg = f"Unsupported message type: {type(message)}"
raise NotImplementedError(msg)
return _message return _message
@ -820,11 +824,12 @@ def trim_messages(
else: else:
list_token_counter = token_counter # type: ignore[assignment] list_token_counter = token_counter # type: ignore[assignment]
else: else:
raise ValueError( msg = (
f"'token_counter' expected to be a model that implements " f"'token_counter' expected to be a model that implements "
f"'get_num_tokens_from_messages()' or a function. Received object of type " f"'get_num_tokens_from_messages()' or a function. Received object of type "
f"{type(token_counter)}." f"{type(token_counter)}."
) )
raise ValueError(msg)
try: try:
from langchain_text_splitters import TextSplitter from langchain_text_splitters import TextSplitter
@ -859,9 +864,8 @@ def trim_messages(
text_splitter=text_splitter_fn, text_splitter=text_splitter_fn,
) )
else: else:
raise ValueError( msg = f"Unrecognized {strategy=}. Supported strategies are 'last' and 'first'."
f"Unrecognized {strategy=}. Supported strategies are 'last' and 'first'." raise ValueError(msg)
)
def _first_max_tokens( def _first_max_tokens(
@ -995,10 +999,11 @@ def _msg_to_chunk(message: BaseMessage) -> BaseMessageChunk:
if isinstance(message, msg_cls): if isinstance(message, msg_cls):
return chunk_cls(**message.model_dump(exclude={"type"})) return chunk_cls(**message.model_dump(exclude={"type"}))
raise ValueError( msg = (
f"Unrecognized message class {message.__class__}. Supported classes are " f"Unrecognized message class {message.__class__}. Supported classes are "
f"{list(_MSG_CHUNK_MAP.keys())}" f"{list(_MSG_CHUNK_MAP.keys())}"
) )
raise ValueError(msg)
def _chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage: def _chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage:
@ -1010,10 +1015,11 @@ def _chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage:
if isinstance(chunk, chunk_cls): if isinstance(chunk, chunk_cls):
return msg_cls(**chunk.model_dump(exclude={"type", "tool_call_chunks"})) return msg_cls(**chunk.model_dump(exclude={"type", "tool_call_chunks"}))
raise ValueError( msg = (
f"Unrecognized message chunk class {chunk.__class__}. Supported classes are " f"Unrecognized message chunk class {chunk.__class__}. Supported classes are "
f"{list(_CHUNK_MSG_MAP.keys())}" f"{list(_CHUNK_MSG_MAP.keys())}"
) )
raise ValueError(msg)
def _default_text_splitter(text: str) -> list[str]: def _default_text_splitter(text: str) -> list[str]:

View File

@ -177,10 +177,11 @@ class BaseOutputParser(
if "args" in metadata and len(metadata["args"]) > 0: if "args" in metadata and len(metadata["args"]) > 0:
return metadata["args"][0] return metadata["args"][0]
raise TypeError( msg = (
f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. " f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. "
"Override the OutputType property to specify the output type." "Override the OutputType property to specify the output type."
) )
raise TypeError(msg)
def invoke( def invoke(
self, self,
@ -310,10 +311,11 @@ class BaseOutputParser(
@property @property
def _type(self) -> str: def _type(self) -> str:
"""Return the output parser type for serialization.""" """Return the output parser type for serialization."""
raise NotImplementedError( msg = (
f"_type property is not implemented in class {self.__class__.__name__}." f"_type property is not implemented in class {self.__class__.__name__}."
" This is required for serialization." " This is required for serialization."
) )
raise NotImplementedError(msg)
def dict(self, **kwargs: Any) -> dict: def dict(self, **kwargs: Any) -> dict:
"""Return dictionary representation of output parser.""" """Return dictionary representation of output parser."""

View File

@ -36,16 +36,14 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
""" """
generation = result[0] generation = result[0]
if not isinstance(generation, ChatGeneration): if not isinstance(generation, ChatGeneration):
raise OutputParserException( msg = "This output parser can only be used with a chat generation."
"This output parser can only be used with a chat generation." raise OutputParserException(msg)
)
message = generation.message message = generation.message
try: try:
func_call = copy.deepcopy(message.additional_kwargs["function_call"]) func_call = copy.deepcopy(message.additional_kwargs["function_call"])
except KeyError as exc: except KeyError as exc:
raise OutputParserException( msg = f"Could not parse function call: {exc}"
f"Could not parse function call: {exc}" raise OutputParserException(msg) from exc
) from exc
if self.args_only: if self.args_only:
return func_call["arguments"] return func_call["arguments"]
@ -88,14 +86,12 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
""" """
if len(result) != 1: if len(result) != 1:
raise OutputParserException( msg = f"Expected exactly one result, but got {len(result)}"
f"Expected exactly one result, but got {len(result)}" raise OutputParserException(msg)
)
generation = result[0] generation = result[0]
if not isinstance(generation, ChatGeneration): if not isinstance(generation, ChatGeneration):
raise OutputParserException( msg = "This output parser can only be used with a chat generation."
"This output parser can only be used with a chat generation." raise OutputParserException(msg)
)
message = generation.message message = generation.message
try: try:
function_call = message.additional_kwargs["function_call"] function_call = message.additional_kwargs["function_call"]
@ -103,9 +99,8 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
if partial: if partial:
return None return None
else: else:
raise OutputParserException( msg = f"Could not parse function call: {exc}"
f"Could not parse function call: {exc}" raise OutputParserException(msg) from exc
) from exc
try: try:
if partial: if partial:
try: try:
@ -129,9 +124,8 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
function_call["arguments"], strict=self.strict function_call["arguments"], strict=self.strict
) )
except (json.JSONDecodeError, TypeError) as exc: except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException( msg = f"Could not parse function call data: {exc}"
f"Could not parse function call data: {exc}" raise OutputParserException(msg) from exc
) from exc
else: else:
try: try:
return { return {
@ -141,9 +135,8 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
), ),
} }
except (json.JSONDecodeError, TypeError) as exc: except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException( msg = f"Could not parse function call data: {exc}"
f"Could not parse function call data: {exc}" raise OutputParserException(msg) from exc
) from exc
except KeyError: except KeyError:
return None return None
@ -253,10 +246,11 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
and issubclass(schema, BaseModel) and issubclass(schema, BaseModel)
) )
elif values["args_only"] and isinstance(schema, dict): elif values["args_only"] and isinstance(schema, dict):
raise ValueError( msg = (
"If multiple pydantic schemas are provided then args_only should be" "If multiple pydantic schemas are provided then args_only should be"
" False." " False."
) )
raise ValueError(msg)
return values return values
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:

View File

@ -52,11 +52,12 @@ def parse_tool_call(
raw_tool_call["function"]["arguments"], strict=strict raw_tool_call["function"]["arguments"], strict=strict
) )
except JSONDecodeError as e: except JSONDecodeError as e:
raise OutputParserException( msg = (
f"Function {raw_tool_call['function']['name']} arguments:\n\n" f"Function {raw_tool_call['function']['name']} arguments:\n\n"
f"{raw_tool_call['function']['arguments']}\n\nare not valid JSON. " f"{raw_tool_call['function']['arguments']}\n\nare not valid JSON. "
f"Received JSONDecodeError {e}" f"Received JSONDecodeError {e}"
) from e )
raise OutputParserException(msg) from e
parsed = { parsed = {
"name": raw_tool_call["function"]["name"] or "", "name": raw_tool_call["function"]["name"] or "",
"args": function_args or {}, "args": function_args or {},
@ -170,9 +171,8 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
generation = result[0] generation = result[0]
if not isinstance(generation, ChatGeneration): if not isinstance(generation, ChatGeneration):
raise OutputParserException( msg = "This output parser can only be used with a chat generation."
"This output parser can only be used with a chat generation." raise OutputParserException(msg)
)
message = generation.message message = generation.message
if isinstance(message, AIMessage) and message.tool_calls: if isinstance(message, AIMessage) and message.tool_calls:
tool_calls = [dict(tc) for tc in message.tool_calls] tool_calls = [dict(tc) for tc in message.tool_calls]
@ -285,10 +285,11 @@ class PydanticToolsParser(JsonOutputToolsParser):
for res in json_results: for res in json_results:
try: try:
if not isinstance(res["args"], dict): if not isinstance(res["args"], dict):
raise ValueError( msg = (
f"Tool arguments must be specified as a dict, received: " f"Tool arguments must be specified as a dict, received: "
f"{res['args']}" f"{res['args']}"
) )
raise ValueError(msg)
pydantic_objects.append(name_dict[res["type"]](**res["args"])) pydantic_objects.append(name_dict[res["type"]](**res["args"]))
except (ValidationError, ValueError) as e: except (ValidationError, ValueError) as e:
if partial: if partial:

View File

@ -29,10 +29,9 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
elif issubclass(self.pydantic_object, pydantic.v1.BaseModel): elif issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj) return self.pydantic_object.parse_obj(obj)
else: else:
raise OutputParserException( msg = f"Unsupported model version for PydanticOutputParser: \
f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}" {self.pydantic_object.__class__}"
) raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e: except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e raise self._parser_exception(e, obj) from e
else: # pydantic v1 else: # pydantic v1

View File

@ -49,11 +49,12 @@ class _StreamingParser:
try: try:
import defusedxml # type: ignore import defusedxml # type: ignore
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"defusedxml is not installed. " "defusedxml is not installed. "
"Please install it to use the defusedxml parser." "Please install it to use the defusedxml parser."
"You can install it with `pip install defusedxml` " "You can install it with `pip install defusedxml` "
) from e )
raise ImportError(msg) from e
_parser = defusedxml.ElementTree.DefusedXMLParser(target=TreeBuilder()) _parser = defusedxml.ElementTree.DefusedXMLParser(target=TreeBuilder())
else: else:
_parser = None _parser = None
@ -190,12 +191,13 @@ class XMLOutputParser(BaseTransformOutputParser):
try: try:
from defusedxml import ElementTree # type: ignore from defusedxml import ElementTree # type: ignore
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"defusedxml is not installed. " "defusedxml is not installed. "
"Please install it to use the defusedxml parser." "Please install it to use the defusedxml parser."
"You can install it with `pip install defusedxml`" "You can install it with `pip install defusedxml`"
"See https://github.com/tiran/defusedxml for more details" "See https://github.com/tiran/defusedxml for more details"
) from e )
raise ImportError(msg) from e
_et = ElementTree # Use the defusedxml parser _et = ElementTree # Use the defusedxml parser
else: else:
_et = ET # Use the standard library parser _et = ET # Use the standard library parser

View File

@ -65,7 +65,8 @@ class ChatGeneration(Generation):
pass pass
self.text = text self.text = text
except (KeyError, AttributeError) as e: except (KeyError, AttributeError) as e:
raise ValueError("Error while initializing ChatGeneration") from e msg = "Error while initializing ChatGeneration"
raise ValueError(msg) from e
return self return self
@classmethod @classmethod
@ -114,6 +115,7 @@ class ChatGenerationChunk(ChatGeneration):
generation_info=generation_info or None, generation_info=generation_info or None,
) )
else: else:
raise TypeError( msg = (
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
) )
raise TypeError(msg)

View File

@ -64,6 +64,7 @@ class GenerationChunk(Generation):
generation_info=generation_info or None, generation_info=generation_info or None,
) )
else: else:
raise TypeError( msg = (
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
) )
raise TypeError(msg)

View File

@ -70,21 +70,22 @@ class BasePromptTemplate(
def validate_variable_names(self) -> Self: def validate_variable_names(self) -> Self:
"""Validate variable names do not include restricted names.""" """Validate variable names do not include restricted names."""
if "stop" in self.input_variables: if "stop" in self.input_variables:
raise ValueError( msg = (
"Cannot have an input variable named 'stop', as it is used internally," "Cannot have an input variable named 'stop', as it is used internally,"
" please rename." " please rename."
) )
raise ValueError(msg)
if "stop" in self.partial_variables: if "stop" in self.partial_variables:
raise ValueError( msg = (
"Cannot have an partial variable named 'stop', as it is used " "Cannot have an partial variable named 'stop', as it is used "
"internally, please rename." "internally, please rename."
) )
raise ValueError(msg)
overall = set(self.input_variables).intersection(self.partial_variables) overall = set(self.input_variables).intersection(self.partial_variables)
if overall: if overall:
raise ValueError( msg = f"Found overlapping input and partial variables: {overall}"
f"Found overlapping input and partial variables: {overall}" raise ValueError(msg)
)
return self return self
@classmethod @classmethod
@ -143,10 +144,11 @@ class BasePromptTemplate(
inner_input = {var_name: inner_input} inner_input = {var_name: inner_input}
else: else:
raise TypeError( msg = (
f"Expected mapping type as input to {self.__class__.__name__}. " f"Expected mapping type as input to {self.__class__.__name__}. "
f"Received {type(inner_input)}." f"Received {type(inner_input)}."
) )
raise TypeError(msg)
missing = set(self.input_variables).difference(inner_input) missing = set(self.input_variables).difference(inner_input)
if missing: if missing:
msg = ( msg = (
@ -341,12 +343,14 @@ class BasePromptTemplate(
prompt.save(file_path="path/prompt.yaml") prompt.save(file_path="path/prompt.yaml")
""" """
if self.partial_variables: if self.partial_variables:
raise ValueError("Cannot save prompt with partial variables.") msg = "Cannot save prompt with partial variables."
raise ValueError(msg)
# Fetch dictionary to save # Fetch dictionary to save
prompt_dict = self.dict() prompt_dict = self.dict()
if "_type" not in prompt_dict: if "_type" not in prompt_dict:
raise NotImplementedError(f"Prompt {self} does not support saving.") msg = f"Prompt {self} does not support saving."
raise NotImplementedError(msg)
# Convert file to Path object. # Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path save_path = Path(file_path) if isinstance(file_path, str) else file_path
@ -361,7 +365,8 @@ class BasePromptTemplate(
with open(file_path, "w") as f: with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False) yaml.dump(prompt_dict, f, default_flow_style=False)
else: else:
raise ValueError(f"{save_path} must be json or yaml") msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
def _get_document_info(doc: Document, prompt: BasePromptTemplate[str]) -> dict: def _get_document_info(doc: Document, prompt: BasePromptTemplate[str]) -> dict:
@ -371,11 +376,12 @@ def _get_document_info(doc: Document, prompt: BasePromptTemplate[str]) -> dict:
required_metadata = [ required_metadata = [
iv for iv in prompt.input_variables if iv != "page_content" iv for iv in prompt.input_variables if iv != "page_content"
] ]
raise ValueError( msg = (
f"Document prompt requires documents to have metadata variables: " f"Document prompt requires documents to have metadata variables: "
f"{required_metadata}. Received document with missing metadata: " f"{required_metadata}. Received document with missing metadata: "
f"{list(missing_metadata)}." f"{list(missing_metadata)}."
) )
raise ValueError(msg)
return {k: base_info[k] for k in prompt.input_variables} return {k: base_info[k] for k in prompt.input_variables}

View File

@ -236,10 +236,11 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
else kwargs[self.variable_name] else kwargs[self.variable_name]
) )
if not isinstance(value, list): if not isinstance(value, list):
raise ValueError( msg = (
f"variable {self.variable_name} should be a list of base messages, " f"variable {self.variable_name} should be a list of base messages, "
f"got {value} of type {type(value)}" f"got {value} of type {type(value)}"
) )
raise ValueError(msg)
value = convert_to_messages(value) value = convert_to_messages(value)
if self.n_messages: if self.n_messages:
value = value[-self.n_messages :] value = value[-self.n_messages :]
@ -514,9 +515,8 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
return cls(prompt=prompt, **kwargs) return cls(prompt=prompt, **kwargs)
elif isinstance(template, list): elif isinstance(template, list):
if (partial_variables is not None) and len(partial_variables) > 0: if (partial_variables is not None) and len(partial_variables) > 0:
raise ValueError( msg = "Partial variables are not supported for list of templates."
"Partial variables are not supported for list of templates." raise ValueError(msg)
)
prompt = [] prompt = []
for tmpl in template: for tmpl in template:
if isinstance(tmpl, str) or isinstance(tmpl, dict) and "text" in tmpl: if isinstance(tmpl, str) or isinstance(tmpl, dict) and "text" in tmpl:
@ -536,11 +536,12 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
vars = get_template_variables(img_template, "f-string") vars = get_template_variables(img_template, "f-string")
if vars: if vars:
if len(vars) > 1: if len(vars) > 1:
raise ValueError( msg = (
"Only one format variable allowed per image" "Only one format variable allowed per image"
f" template.\nGot: {vars}" f" template.\nGot: {vars}"
f"\nFrom: {tmpl}" f"\nFrom: {tmpl}"
) )
raise ValueError(msg)
input_variables = [vars[0]] input_variables = [vars[0]]
img_template = {"url": img_template} img_template = {"url": img_template}
img_template_obj = ImagePromptTemplate( img_template_obj = ImagePromptTemplate(
@ -559,13 +560,16 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
input_variables=input_variables, template=img_template input_variables=input_variables, template=img_template
) )
else: else:
raise ValueError(f"Invalid image template: {tmpl}") msg = f"Invalid image template: {tmpl}"
raise ValueError(msg)
prompt.append(img_template_obj) prompt.append(img_template_obj)
else: else:
raise ValueError(f"Invalid template: {tmpl}") msg = f"Invalid template: {tmpl}"
raise ValueError(msg)
return cls(prompt=prompt, **kwargs) return cls(prompt=prompt, **kwargs)
else: else:
raise ValueError(f"Invalid template: {template}") msg = f"Invalid template: {template}"
raise ValueError(msg)
@classmethod @classmethod
def from_template_file( def from_template_file(
@ -1042,7 +1046,8 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
prompt = HumanMessagePromptTemplate.from_template(other) prompt = HumanMessagePromptTemplate.from_template(other)
return ChatPromptTemplate(messages=self.messages + [prompt]) # type: ignore[call-arg] return ChatPromptTemplate(messages=self.messages + [prompt]) # type: ignore[call-arg]
else: else:
raise NotImplementedError(f"Unsupported operand type for +: {type(other)}") msg = f"Unsupported operand type for +: {type(other)}"
raise NotImplementedError(msg)
@model_validator(mode="before") @model_validator(mode="before")
@classmethod @classmethod
@ -1085,11 +1090,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
input_vars = input_vars - optional_variables input_vars = input_vars - optional_variables
if "input_variables" in values and values.get("validate_template"): if "input_variables" in values and values.get("validate_template"):
if input_vars != set(values["input_variables"]): if input_vars != set(values["input_variables"]):
raise ValueError( msg = (
"Got mismatched input_variables. " "Got mismatched input_variables. "
f"Expected: {input_vars}. " f"Expected: {input_vars}. "
f"Got: {values['input_variables']}" f"Got: {values['input_variables']}"
) )
raise ValueError(msg)
else: else:
values["input_variables"] = sorted(input_vars) values["input_variables"] = sorted(input_vars)
if optional_variables: if optional_variables:
@ -1214,7 +1220,8 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
message = message_template.format_messages(**kwargs) message = message_template.format_messages(**kwargs)
result.extend(message) result.extend(message)
else: else:
raise ValueError(f"Unexpected input: {message_template}") msg = f"Unexpected input: {message_template}"
raise ValueError(msg)
return result return result
async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]: async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
@ -1241,7 +1248,8 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
message = await message_template.aformat_messages(**kwargs) message = await message_template.aformat_messages(**kwargs)
result.extend(message) result.extend(message)
else: else:
raise ValueError(f"Unexpected input: {message_template}") msg = f"Unexpected input: {message_template}"
raise ValueError(msg)
return result return result
def partial(self, **kwargs: Any) -> ChatPromptTemplate: def partial(self, **kwargs: Any) -> ChatPromptTemplate:
@ -1376,38 +1384,43 @@ def _create_template_from_message_type(
elif message_type == "placeholder": elif message_type == "placeholder":
if isinstance(template, str): if isinstance(template, str):
if template[0] != "{" or template[-1] != "}": if template[0] != "{" or template[-1] != "}":
raise ValueError( msg = (
f"Invalid placeholder template: {template}." f"Invalid placeholder template: {template}."
" Expected a variable name surrounded by curly braces." " Expected a variable name surrounded by curly braces."
) )
raise ValueError(msg)
var_name = template[1:-1] var_name = template[1:-1]
message = MessagesPlaceholder(variable_name=var_name, optional=True) message = MessagesPlaceholder(variable_name=var_name, optional=True)
elif len(template) == 2 and isinstance(template[1], bool): elif len(template) == 2 and isinstance(template[1], bool):
var_name_wrapped, is_optional = template var_name_wrapped, is_optional = template
if not isinstance(var_name_wrapped, str): if not isinstance(var_name_wrapped, str):
raise ValueError( msg = (
"Expected variable name to be a string." f" Got: {var_name_wrapped}" "Expected variable name to be a string." f" Got: {var_name_wrapped}"
) )
raise ValueError(msg)
if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}": if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}":
raise ValueError( msg = (
f"Invalid placeholder template: {var_name_wrapped}." f"Invalid placeholder template: {var_name_wrapped}."
" Expected a variable name surrounded by curly braces." " Expected a variable name surrounded by curly braces."
) )
raise ValueError(msg)
var_name = var_name_wrapped[1:-1] var_name = var_name_wrapped[1:-1]
message = MessagesPlaceholder(variable_name=var_name, optional=is_optional) message = MessagesPlaceholder(variable_name=var_name, optional=is_optional)
else: else:
raise ValueError( msg = (
"Unexpected arguments for placeholder message type." "Unexpected arguments for placeholder message type."
" Expected either a single string variable name" " Expected either a single string variable name"
" or a list of [variable_name: str, is_optional: bool]." " or a list of [variable_name: str, is_optional: bool]."
f" Got: {template}" f" Got: {template}"
) )
raise ValueError(msg)
else: else:
raise ValueError( msg = (
f"Unexpected message type: {message_type}. Use one of 'human'," f"Unexpected message type: {message_type}. Use one of 'human',"
f" 'user', 'ai', 'assistant', or 'system'." f" 'user', 'ai', 'assistant', or 'system'."
) )
raise ValueError(msg)
return message return message
@ -1448,7 +1461,8 @@ def _convert_to_message(
) )
elif isinstance(message, tuple): elif isinstance(message, tuple):
if len(message) != 2: if len(message) != 2:
raise ValueError(f"Expected 2-tuple of (role, template), got {message}") msg = f"Expected 2-tuple of (role, template), got {message}"
raise ValueError(msg)
message_type_str, template = message message_type_str, template = message
if isinstance(message_type_str, str): if isinstance(message_type_str, str):
_message = _create_template_from_message_type( _message = _create_template_from_message_type(
@ -1461,6 +1475,7 @@ def _convert_to_message(
) )
) )
else: else:
raise NotImplementedError(f"Unsupported message type: {type(message)}") msg = f"Unsupported message type: {type(message)}"
raise NotImplementedError(msg)
return _message return _message

View File

@ -62,14 +62,12 @@ class _FewShotPromptTemplateMixin(BaseModel):
examples = values.get("examples") examples = values.get("examples")
example_selector = values.get("example_selector") example_selector = values.get("example_selector")
if examples and example_selector: if examples and example_selector:
raise ValueError( msg = "Only one of 'examples' and 'example_selector' should be provided"
"Only one of 'examples' and 'example_selector' should be provided" raise ValueError(msg)
)
if examples is None and example_selector is None: if examples is None and example_selector is None:
raise ValueError( msg = "One of 'examples' and 'example_selector' should be provided"
"One of 'examples' and 'example_selector' should be provided" raise ValueError(msg)
)
return values return values
@ -90,9 +88,8 @@ class _FewShotPromptTemplateMixin(BaseModel):
elif self.example_selector is not None: elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs) return self.example_selector.select_examples(kwargs)
else: else:
raise ValueError( msg = "One of 'examples' and 'example_selector' should be provided"
"One of 'examples' and 'example_selector' should be provided" raise ValueError(msg)
)
async def _aget_examples(self, **kwargs: Any) -> list[dict]: async def _aget_examples(self, **kwargs: Any) -> list[dict]:
"""Async get the examples to use for formatting the prompt. """Async get the examples to use for formatting the prompt.
@ -111,9 +108,8 @@ class _FewShotPromptTemplateMixin(BaseModel):
elif self.example_selector is not None: elif self.example_selector is not None:
return await self.example_selector.aselect_examples(kwargs) return await self.example_selector.aselect_examples(kwargs)
else: else:
raise ValueError( msg = "One of 'examples' and 'example_selector' should be provided"
"One of 'examples' and 'example_selector' should be provided" raise ValueError(msg)
)
class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate): class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
@ -243,7 +239,8 @@ class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
ValueError: If example_selector is provided. ValueError: If example_selector is provided.
""" """
if self.example_selector: if self.example_selector:
raise ValueError("Saving an example selector is not currently supported") msg = "Saving an example selector is not currently supported"
raise ValueError(msg)
return super().save(file_path) return super().save(file_path)

View File

@ -54,14 +54,12 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
examples = values.get("examples") examples = values.get("examples")
example_selector = values.get("example_selector") example_selector = values.get("example_selector")
if examples and example_selector: if examples and example_selector:
raise ValueError( msg = "Only one of 'examples' and 'example_selector' should be provided"
"Only one of 'examples' and 'example_selector' should be provided" raise ValueError(msg)
)
if examples is None and example_selector is None: if examples is None and example_selector is None:
raise ValueError( msg = "One of 'examples' and 'example_selector' should be provided"
"One of 'examples' and 'example_selector' should be provided" raise ValueError(msg)
)
return values return values
@ -76,10 +74,11 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
expected_input_variables |= set(self.prefix.input_variables) expected_input_variables |= set(self.prefix.input_variables)
missing_vars = expected_input_variables.difference(input_variables) missing_vars = expected_input_variables.difference(input_variables)
if missing_vars: if missing_vars:
raise ValueError( msg = (
f"Got input_variables={input_variables}, but based on " f"Got input_variables={input_variables}, but based on "
f"prefix/suffix expected {expected_input_variables}" f"prefix/suffix expected {expected_input_variables}"
) )
raise ValueError(msg)
else: else:
self.input_variables = sorted( self.input_variables = sorted(
set(self.suffix.input_variables) set(self.suffix.input_variables)
@ -216,5 +215,6 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
ValueError: If example_selector is provided. ValueError: If example_selector is provided.
""" """
if self.example_selector: if self.example_selector:
raise ValueError("Saving an example selector is not currently supported") msg = "Saving an example selector is not currently supported"
raise ValueError(msg)
return super().save(file_path) return super().save(file_path)

View File

@ -20,11 +20,12 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"} overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"}
if overlap: if overlap:
raise ValueError( msg = (
"input_variables for the image template cannot contain" "input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'." " any of 'url', 'path', or 'detail'."
f" Found: {overlap}" f" Found: {overlap}"
) )
raise ValueError(msg)
super().__init__(**kwargs) super().__init__(**kwargs)
@property @property
@ -91,13 +92,16 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
path = kwargs.get("path") or formatted.get("path") path = kwargs.get("path") or formatted.get("path")
detail = kwargs.get("detail") or formatted.get("detail") detail = kwargs.get("detail") or formatted.get("detail")
if not url and not path: if not url and not path:
raise ValueError("Must provide either url or path.") msg = "Must provide either url or path."
raise ValueError(msg)
if not url: if not url:
if not isinstance(path, str): if not isinstance(path, str):
raise ValueError("path must be a string.") msg = "path must be a string."
raise ValueError(msg)
url = image_utils.image_to_data_url(path) url = image_utils.image_to_data_url(path)
if not isinstance(url, str): if not isinstance(url, str):
raise ValueError("url must be a string.") msg = "url must be a string."
raise ValueError(msg)
output: ImageURL = {"url": url} output: ImageURL = {"url": url}
if detail: if detail:
# Don't check literal values here: let the API check them # Don't check literal values here: let the API check them

View File

@ -34,7 +34,8 @@ def load_prompt_from_config(config: dict) -> BasePromptTemplate:
config_type = config.pop("_type", "prompt") config_type = config.pop("_type", "prompt")
if config_type not in type_to_loader_dict: if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} prompt not supported") msg = f"Loading {config_type} prompt not supported"
raise ValueError(msg)
prompt_loader = type_to_loader_dict[config_type] prompt_loader = type_to_loader_dict[config_type]
return prompt_loader(config) return prompt_loader(config)
@ -46,9 +47,8 @@ def _load_template(var_name: str, config: dict) -> dict:
if f"{var_name}_path" in config: if f"{var_name}_path" in config:
# If it does, make sure template variable doesn't also exist. # If it does, make sure template variable doesn't also exist.
if var_name in config: if var_name in config:
raise ValueError( msg = f"Both `{var_name}_path` and `{var_name}` cannot be provided."
f"Both `{var_name}_path` and `{var_name}` cannot be provided." raise ValueError(msg)
)
# Pop the template path from the config. # Pop the template path from the config.
template_path = Path(config.pop(f"{var_name}_path")) template_path = Path(config.pop(f"{var_name}_path"))
# Load the template. # Load the template.
@ -73,12 +73,12 @@ def _load_examples(config: dict) -> dict:
elif config["examples"].endswith((".yaml", ".yml")): elif config["examples"].endswith((".yaml", ".yml")):
examples = yaml.safe_load(f) examples = yaml.safe_load(f)
else: else:
raise ValueError( msg = "Invalid file format. Only json or yaml formats are supported."
"Invalid file format. Only json or yaml formats are supported." raise ValueError(msg)
)
config["examples"] = examples config["examples"] = examples
else: else:
raise ValueError("Invalid examples format. Only list or string are supported.") msg = "Invalid examples format. Only list or string are supported."
raise ValueError(msg)
return config return config
@ -90,7 +90,8 @@ def _load_output_parser(config: dict) -> dict:
if output_parser_type == "default": if output_parser_type == "default":
output_parser = StrOutputParser(**_config) output_parser = StrOutputParser(**_config)
else: else:
raise ValueError(f"Unsupported output parser {output_parser_type}") msg = f"Unsupported output parser {output_parser_type}"
raise ValueError(msg)
config["output_parser"] = output_parser config["output_parser"] = output_parser
return config return config
@ -103,10 +104,11 @@ def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
# Load the example prompt. # Load the example prompt.
if "example_prompt_path" in config: if "example_prompt_path" in config:
if "example_prompt" in config: if "example_prompt" in config:
raise ValueError( msg = (
"Only one of example_prompt and example_prompt_path should " "Only one of example_prompt and example_prompt_path should "
"be specified." "be specified."
) )
raise ValueError(msg)
config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
else: else:
config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
@ -126,11 +128,12 @@ def _load_prompt(config: dict) -> PromptTemplate:
if template_format == "jinja2": if template_format == "jinja2":
# Disabled due to: # Disabled due to:
# https://github.com/langchain-ai/langchain/issues/4394 # https://github.com/langchain-ai/langchain/issues/4394
raise ValueError( msg = (
f"Loading templates with '{template_format}' format is no longer supported " f"Loading templates with '{template_format}' format is no longer supported "
f"since it can lead to arbitrary code execution. Please migrate to using " f"since it can lead to arbitrary code execution. Please migrate to using "
f"the 'f-string' template format, which does not suffer from this issue." f"the 'f-string' template format, which does not suffer from this issue."
) )
raise ValueError(msg)
return PromptTemplate(**config) return PromptTemplate(**config)
@ -151,11 +154,12 @@ def load_prompt(
RuntimeError: If the path is a Lang Chain Hub path. RuntimeError: If the path is a Lang Chain Hub path.
""" """
if isinstance(path, str) and path.startswith("lc://"): if isinstance(path, str) and path.startswith("lc://"):
raise RuntimeError( msg = (
"Loading from the deprecated github-based Hub is no longer supported. " "Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub " "Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead." "instead."
) )
raise RuntimeError(msg)
return _load_prompt_from_file(path, encoding) return _load_prompt_from_file(path, encoding)
@ -173,7 +177,8 @@ def _load_prompt_from_file(
with open(file_path, encoding=encoding) as f: with open(file_path, encoding=encoding) as f:
config = yaml.safe_load(f) config = yaml.safe_load(f)
else: else:
raise ValueError(f"Got unsupported file type {file_path.suffix}") msg = f"Got unsupported file type {file_path.suffix}"
raise ValueError(msg)
# Load the prompt from the config now. # Load the prompt from the config now.
return load_prompt_from_config(config) return load_prompt_from_config(config)
@ -186,7 +191,8 @@ def _load_chat_prompt(config: dict) -> ChatPromptTemplate:
config.pop("input_variables") config.pop("input_variables")
if not template: if not template:
raise ValueError("Can't load chat prompt without template") msg = "Can't load chat prompt without template"
raise ValueError(msg)
return ChatPromptTemplate.from_template(template=template, **config) return ChatPromptTemplate.from_template(template=template, **config)

View File

@ -89,12 +89,12 @@ class PromptTemplate(StringPromptTemplate):
if values.get("validate_template"): if values.get("validate_template"):
if values["template_format"] == "mustache": if values["template_format"] == "mustache":
raise ValueError("Mustache templates cannot be validated.") msg = "Mustache templates cannot be validated."
raise ValueError(msg)
if "input_variables" not in values: if "input_variables" not in values:
raise ValueError( msg = "Input variables must be provided to validate the template."
"Input variables must be provided to validate the template." raise ValueError(msg)
)
all_inputs = values["input_variables"] + list(values["partial_variables"]) all_inputs = values["input_variables"] + list(values["partial_variables"])
check_valid_template( check_valid_template(
@ -131,13 +131,11 @@ class PromptTemplate(StringPromptTemplate):
# Allow for easy combining # Allow for easy combining
if isinstance(other, PromptTemplate): if isinstance(other, PromptTemplate):
if self.template_format != "f-string": if self.template_format != "f-string":
raise ValueError( msg = "Adding prompt templates only supported for f-strings."
"Adding prompt templates only supported for f-strings." raise ValueError(msg)
)
if other.template_format != "f-string": if other.template_format != "f-string":
raise ValueError( msg = "Adding prompt templates only supported for f-strings."
"Adding prompt templates only supported for f-strings." raise ValueError(msg)
)
input_variables = list( input_variables = list(
set(self.input_variables) | set(other.input_variables) set(self.input_variables) | set(other.input_variables)
) )
@ -147,7 +145,8 @@ class PromptTemplate(StringPromptTemplate):
partial_variables = dict(self.partial_variables.items()) partial_variables = dict(self.partial_variables.items())
for k, v in other.partial_variables.items(): for k, v in other.partial_variables.items():
if k in partial_variables: if k in partial_variables:
raise ValueError("Cannot have same variable partialed twice.") msg = "Cannot have same variable partialed twice."
raise ValueError(msg)
else: else:
partial_variables[k] = v partial_variables[k] = v
return PromptTemplate( return PromptTemplate(
@ -161,7 +160,8 @@ class PromptTemplate(StringPromptTemplate):
prompt = PromptTemplate.from_template(other) prompt = PromptTemplate.from_template(other)
return self + prompt return self + prompt
else: else:
raise NotImplementedError(f"Unsupported operand type for +: {type(other)}") msg = f"Unsupported operand type for +: {type(other)}"
raise NotImplementedError(msg)
@property @property
def _prompt_type(self) -> str: def _prompt_type(self) -> str:

View File

@ -42,13 +42,14 @@ def jinja2_formatter(template: str, /, **kwargs: Any) -> str:
try: try:
from jinja2.sandbox import SandboxedEnvironment from jinja2.sandbox import SandboxedEnvironment
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"jinja2 not installed, which is needed to use the jinja2_formatter. " "jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`." "Please install it with `pip install jinja2`."
"Please be cautious when using jinja2 templates. " "Please be cautious when using jinja2 templates. "
"Do not expand jinja2 templates using unverified or user-controlled " "Do not expand jinja2 templates using unverified or user-controlled "
"inputs as that can result in arbitrary Python code execution." "inputs as that can result in arbitrary Python code execution."
) from e )
raise ImportError(msg) from e
# This uses a sandboxed environment to prevent arbitrary code execution. # This uses a sandboxed environment to prevent arbitrary code execution.
# Jinja2 uses an opt-out rather than opt-in approach for sand-boxing. # Jinja2 uses an opt-out rather than opt-in approach for sand-boxing.
@ -89,10 +90,11 @@ def _get_jinja2_variables_from_template(template: str) -> set[str]:
try: try:
from jinja2 import Environment, meta from jinja2 import Environment, meta
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"jinja2 not installed, which is needed to use the jinja2_formatter. " "jinja2 not installed, which is needed to use the jinja2_formatter. "
"Please install it with `pip install jinja2`." "Please install it with `pip install jinja2`."
) from e )
raise ImportError(msg) from e
env = Environment() env = Environment()
ast = env.parse(template) ast = env.parse(template)
variables = meta.find_undeclared_variables(ast) variables = meta.find_undeclared_variables(ast)
@ -217,17 +219,19 @@ def check_valid_template(
try: try:
validator_func = DEFAULT_VALIDATOR_MAPPING[template_format] validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]
except KeyError as exc: except KeyError as exc:
raise ValueError( msg = (
f"Invalid template format {template_format!r}, should be one of" f"Invalid template format {template_format!r}, should be one of"
f" {list(DEFAULT_FORMATTER_MAPPING)}." f" {list(DEFAULT_FORMATTER_MAPPING)}."
) from exc )
raise ValueError(msg) from exc
try: try:
validator_func(template, input_variables) validator_func(template, input_variables)
except (KeyError, IndexError) as exc: except (KeyError, IndexError) as exc:
raise ValueError( msg = (
"Invalid prompt schema; check for mismatched or missing input parameters" "Invalid prompt schema; check for mismatched or missing input parameters"
f" from {input_variables}." f" from {input_variables}."
) from exc )
raise ValueError(msg) from exc
def get_template_variables(template: str, template_format: str) -> list[str]: def get_template_variables(template: str, template_format: str) -> list[str]:
@ -253,7 +257,8 @@ def get_template_variables(template: str, template_format: str) -> list[str]:
elif template_format == "mustache": elif template_format == "mustache":
input_variables = mustache_template_vars(template) input_variables = mustache_template_vars(template)
else: else:
raise ValueError(f"Unsupported template format: {template_format}") msg = f"Unsupported template format: {template_format}"
raise ValueError(msg)
return sorted(input_variables) return sorted(input_variables)

View File

@ -156,6 +156,5 @@ class StructuredPrompt(ChatPromptTemplate):
name=name, name=name,
) )
else: else:
raise NotImplementedError( msg = "Structured prompts need to be piped to a language model."
"Structured prompts need to be piped to a language model." raise NotImplementedError(msg)
)

View File

@ -292,10 +292,11 @@ class Runnable(Generic[Input, Output], ABC):
if type_args and len(type_args) == 2: if type_args and len(type_args) == 2:
return type_args[0] return type_args[0]
raise TypeError( msg = (
f"Runnable {self.get_name()} doesn't have an inferable InputType. " f"Runnable {self.get_name()} doesn't have an inferable InputType. "
"Override the InputType property to specify the input type." "Override the InputType property to specify the input type."
) )
raise TypeError(msg)
@property @property
def OutputType(self) -> type[Output]: # noqa: N802 def OutputType(self) -> type[Output]: # noqa: N802
@ -313,10 +314,11 @@ class Runnable(Generic[Input, Output], ABC):
if type_args and len(type_args) == 2: if type_args and len(type_args) == 2:
return type_args[1] return type_args[1]
raise TypeError( msg = (
f"Runnable {self.get_name()} doesn't have an inferable OutputType. " f"Runnable {self.get_name()} doesn't have an inferable OutputType. "
"Override the OutputType property to specify the output type." "Override the OutputType property to specify the output type."
) )
raise TypeError(msg)
@property @property
def input_schema(self) -> type[BaseModel]: def input_schema(self) -> type[BaseModel]:
@ -1379,9 +1381,8 @@ class Runnable(Generic[Input, Output], ABC):
**kwargs, **kwargs,
) )
else: else:
raise NotImplementedError( msg = 'Only versions "v1" and "v2" of the schema is currently supported.'
'Only versions "v1" and "v2" of the schema is currently supported.' raise NotImplementedError(msg)
)
async with aclosing(event_stream): async with aclosing(event_stream):
async for event in event_stream: async for event in event_stream:
@ -2513,10 +2514,11 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
for key in kwargs: for key in kwargs:
if key not in self.model_fields: if key not in self.model_fields:
raise ValueError( msg = (
f"Configuration key {key} not found in {self}: " f"Configuration key {key} not found in {self}: "
f"available keys are {self.model_fields.keys()}" f"available keys are {self.model_fields.keys()}"
) )
raise ValueError(msg)
return RunnableConfigurableFields(default=self, fields=kwargs) return RunnableConfigurableFields(default=self, fields=kwargs)
@ -2772,9 +2774,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
else: else:
steps_flat.append(coerce_to_runnable(step)) steps_flat.append(coerce_to_runnable(step))
if len(steps_flat) < 2: if len(steps_flat) < 2:
raise ValueError( msg = f"RunnableSequence must have at least 2 steps, got {len(steps_flat)}"
f"RunnableSequence must have at least 2 steps, got {len(steps_flat)}" raise ValueError(msg)
)
super().__init__( # type: ignore[call-arg] super().__init__( # type: ignore[call-arg]
first=steps_flat[0], first=steps_flat[0],
middle=list(steps_flat[1:-1]), middle=list(steps_flat[1:-1]),
@ -2923,7 +2924,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
step_graph.trim_last_node() step_graph.trim_last_node()
step_first_node, _ = graph.extend(step_graph) step_first_node, _ = graph.extend(step_graph)
if not step_first_node: if not step_first_node:
raise ValueError(f"Runnable {step} has no first node") msg = f"Runnable {step} has no first node"
raise ValueError(msg)
if current_last_node: if current_last_node:
graph.add_edge(current_last_node, step_first_node) graph.add_edge(current_last_node, step_first_node)
@ -3655,9 +3657,11 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
else: else:
step_first_node, step_last_node = graph.extend(step_graph) step_first_node, step_last_node = graph.extend(step_graph)
if not step_first_node: if not step_first_node:
raise ValueError(f"Runnable {step} has no first node") msg = f"Runnable {step} has no first node"
raise ValueError(msg)
if not step_last_node: if not step_last_node:
raise ValueError(f"Runnable {step} has no last node") msg = f"Runnable {step} has no last node"
raise ValueError(msg)
graph.add_edge(input_node, step_first_node) graph.add_edge(input_node, step_first_node)
graph.add_edge(step_last_node, output_node) graph.add_edge(step_last_node, output_node)
@ -4049,10 +4053,11 @@ class RunnableGenerator(Runnable[Input, Output]):
self._transform = transform self._transform = transform
func_for_name = transform func_for_name = transform
else: else:
raise TypeError( msg = (
"Expected a generator function type for `transform`." "Expected a generator function type for `transform`."
f"Instead got an unsupported type: {type(transform)}" f"Instead got an unsupported type: {type(transform)}"
) )
raise TypeError(msg)
try: try:
self.name = name or func_for_name.__name__ self.name = name or func_for_name.__name__
@ -4161,7 +4166,8 @@ class RunnableGenerator(Runnable[Input, Output]):
**kwargs: Any, **kwargs: Any,
) -> Iterator[Output]: ) -> Iterator[Output]:
if not hasattr(self, "_transform"): if not hasattr(self, "_transform"):
raise NotImplementedError(f"{repr(self)} only supports async methods.") msg = f"{repr(self)} only supports async methods."
raise NotImplementedError(msg)
return self._transform_stream_with_config( return self._transform_stream_with_config(
input, input,
self._transform, # type: ignore[arg-type] self._transform, # type: ignore[arg-type]
@ -4192,7 +4198,8 @@ class RunnableGenerator(Runnable[Input, Output]):
**kwargs: Any, **kwargs: Any,
) -> AsyncIterator[Output]: ) -> AsyncIterator[Output]:
if not hasattr(self, "_atransform"): if not hasattr(self, "_atransform"):
raise NotImplementedError(f"{repr(self)} only supports sync methods.") msg = f"{repr(self)} only supports sync methods."
raise NotImplementedError(msg)
return self._atransform_stream_with_config( return self._atransform_stream_with_config(
input, self._atransform, config, **kwargs input, self._atransform, config, **kwargs
@ -4320,21 +4327,23 @@ class RunnableLambda(Runnable[Input, Output]):
if is_async_callable(func) or is_async_generator(func): if is_async_callable(func) or is_async_generator(func):
if afunc is not None: if afunc is not None:
raise TypeError( msg = (
"Func was provided as a coroutine function, but afunc was " "Func was provided as a coroutine function, but afunc was "
"also provided. If providing both, func should be a regular " "also provided. If providing both, func should be a regular "
"function to avoid ambiguity." "function to avoid ambiguity."
) )
raise TypeError(msg)
self.afunc = func self.afunc = func
func_for_name = func func_for_name = func
elif callable(func): elif callable(func):
self.func = cast(Callable[[Input], Output], func) self.func = cast(Callable[[Input], Output], func)
func_for_name = func func_for_name = func
else: else:
raise TypeError( msg = (
"Expected a callable type for `func`." "Expected a callable type for `func`."
f"Instead got an unsupported type: {type(func)}" f"Instead got an unsupported type: {type(func)}"
) )
raise TypeError(msg)
try: try:
if name is not None: if name is not None:
@ -4497,9 +4506,11 @@ class RunnableLambda(Runnable[Input, Output]):
else: else:
dep_first_node, dep_last_node = graph.extend(dep_graph) dep_first_node, dep_last_node = graph.extend(dep_graph)
if not dep_first_node: if not dep_first_node:
raise ValueError(f"Runnable {dep} has no first node") msg = f"Runnable {dep} has no first node"
raise ValueError(msg)
if not dep_last_node: if not dep_last_node:
raise ValueError(f"Runnable {dep} has no last node") msg = f"Runnable {dep} has no last node"
raise ValueError(msg)
graph.add_edge(input_node, dep_first_node) graph.add_edge(input_node, dep_first_node)
graph.add_edge(dep_last_node, output_node) graph.add_edge(dep_last_node, output_node)
else: else:
@ -4560,9 +4571,10 @@ class RunnableLambda(Runnable[Input, Output]):
if isinstance(output, Runnable): if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"] recursion_limit = config["recursion_limit"]
if recursion_limit <= 0: if recursion_limit <= 0:
raise RecursionError( msg = (
f"Recursion limit reached when invoking {self} with input {input}." f"Recursion limit reached when invoking {self} with input {input}."
) )
raise RecursionError(msg)
output = output.invoke( output = output.invoke(
input, input,
patch_config( patch_config(
@ -4659,9 +4671,10 @@ class RunnableLambda(Runnable[Input, Output]):
if isinstance(output, Runnable): if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"] recursion_limit = config["recursion_limit"]
if recursion_limit <= 0: if recursion_limit <= 0:
raise RecursionError( msg = (
f"Recursion limit reached when invoking {self} with input {input}." f"Recursion limit reached when invoking {self} with input {input}."
) )
raise RecursionError(msg)
output = await output.ainvoke( output = await output.ainvoke(
input, input,
patch_config( patch_config(
@ -4704,10 +4717,11 @@ class RunnableLambda(Runnable[Input, Output]):
**kwargs, **kwargs,
) )
else: else:
raise TypeError( msg = (
"Cannot invoke a coroutine function synchronously." "Cannot invoke a coroutine function synchronously."
"Use `ainvoke` instead." "Use `ainvoke` instead."
) )
raise TypeError(msg)
async def ainvoke( async def ainvoke(
self, self,
@ -4778,10 +4792,11 @@ class RunnableLambda(Runnable[Input, Output]):
if isinstance(output, Runnable): if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"] recursion_limit = config["recursion_limit"]
if recursion_limit <= 0: if recursion_limit <= 0:
raise RecursionError( msg = (
f"Recursion limit reached when invoking " f"Recursion limit reached when invoking "
f"{self} with input {final}." f"{self} with input {final}."
) )
raise RecursionError(msg)
for chunk in output.stream( for chunk in output.stream(
final, final,
patch_config( patch_config(
@ -4809,10 +4824,11 @@ class RunnableLambda(Runnable[Input, Output]):
**kwargs, **kwargs,
) )
else: else:
raise TypeError( msg = (
"Cannot stream a coroutine function synchronously." "Cannot stream a coroutine function synchronously."
"Use `astream` instead." "Use `astream` instead."
) )
raise TypeError(msg)
def stream( def stream(
self, self,
@ -4849,10 +4865,11 @@ class RunnableLambda(Runnable[Input, Output]):
afunc = self.afunc afunc = self.afunc
else: else:
if inspect.isgeneratorfunction(self.func): if inspect.isgeneratorfunction(self.func):
raise TypeError( msg = (
"Cannot stream from a generator function asynchronously." "Cannot stream from a generator function asynchronously."
"Use .stream() instead." "Use .stream() instead."
) )
raise TypeError(msg)
def func( def func(
input: Input, input: Input,
@ -4899,10 +4916,11 @@ class RunnableLambda(Runnable[Input, Output]):
if isinstance(output, Runnable): if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"] recursion_limit = config["recursion_limit"]
if recursion_limit <= 0: if recursion_limit <= 0:
raise RecursionError( msg = (
f"Recursion limit reached when invoking " f"Recursion limit reached when invoking "
f"{self} with input {final}." f"{self} with input {final}."
) )
raise RecursionError(msg)
async for chunk in output.astream( async for chunk in output.astream(
final, final,
patch_config( patch_config(
@ -5061,9 +5079,8 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
**kwargs: Optional[Any], **kwargs: Optional[Any],
) -> AsyncIterator[StreamEvent]: ) -> AsyncIterator[StreamEvent]:
for _ in range(1): for _ in range(1):
raise NotImplementedError( msg = "RunnableEach does not support astream_events yet."
"RunnableEach does not support astream_events yet." raise NotImplementedError(msg)
)
yield yield
@ -5819,10 +5836,11 @@ def coerce_to_runnable(thing: RunnableLike) -> Runnable[Input, Output]:
elif isinstance(thing, dict): elif isinstance(thing, dict):
return cast(Runnable[Input, Output], RunnableParallel(thing)) return cast(Runnable[Input, Output], RunnableParallel(thing))
else: else:
raise TypeError( msg = (
f"Expected a Runnable, callable or dict." f"Expected a Runnable, callable or dict."
f"Instead got an unsupported type: {type(thing)}" f"Instead got an unsupported type: {type(thing)}"
) )
raise TypeError(msg)
@overload @overload

View File

@ -92,7 +92,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
ValueError: If a branch is not of length 2. ValueError: If a branch is not of length 2.
""" """
if len(branches) < 2: if len(branches) < 2:
raise ValueError("RunnableBranch requires at least two branches") msg = "RunnableBranch requires at least two branches"
raise ValueError(msg)
default = branches[-1] default = branches[-1]
@ -100,9 +101,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
default, default,
(Runnable, Callable, Mapping), # type: ignore[arg-type] (Runnable, Callable, Mapping), # type: ignore[arg-type]
): ):
raise TypeError( msg = "RunnableBranch default must be Runnable, callable or mapping."
"RunnableBranch default must be Runnable, callable or mapping." raise TypeError(msg)
)
default_ = cast( default_ = cast(
Runnable[Input, Output], coerce_to_runnable(cast(RunnableLike, default)) Runnable[Input, Output], coerce_to_runnable(cast(RunnableLike, default))
@ -112,16 +112,18 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
for branch in branches[:-1]: for branch in branches[:-1]:
if not isinstance(branch, (tuple, list)): # type: ignore[arg-type] if not isinstance(branch, (tuple, list)): # type: ignore[arg-type]
raise TypeError( msg = (
f"RunnableBranch branches must be " f"RunnableBranch branches must be "
f"tuples or lists, not {type(branch)}" f"tuples or lists, not {type(branch)}"
) )
raise TypeError(msg)
if not len(branch) == 2: if len(branch) != 2:
raise ValueError( msg = (
f"RunnableBranch branches must be " f"RunnableBranch branches must be "
f"tuples or lists of length 2, not {len(branch)}" f"tuples or lists of length 2, not {len(branch)}"
) )
raise ValueError(msg)
condition, runnable = branch condition, runnable = branch
condition = cast(Runnable[Input, bool], coerce_to_runnable(condition)) condition = cast(Runnable[Input, bool], coerce_to_runnable(condition))
runnable = coerce_to_runnable(runnable) runnable = coerce_to_runnable(runnable)
@ -185,7 +187,8 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
and s.id.endswith(CONTEXT_CONFIG_SUFFIX_SET) and s.id.endswith(CONTEXT_CONFIG_SUFFIX_SET)
for s in specs for s in specs
): ):
raise ValueError("RunnableBranch cannot contain context setters.") msg = "RunnableBranch cannot contain context setters."
raise ValueError(msg)
return specs return specs
def invoke( def invoke(

View File

@ -219,12 +219,14 @@ def get_config_list(
""" """
if length < 0: if length < 0:
raise ValueError(f"length must be >= 0, but got {length}") msg = f"length must be >= 0, but got {length}"
raise ValueError(msg)
if isinstance(config, Sequence) and len(config) != length: if isinstance(config, Sequence) and len(config) != length:
raise ValueError( msg = (
f"config must be a list of the same length as inputs, " f"config must be a list of the same length as inputs, "
f"but got {len(config)} configs for {length} inputs" f"but got {len(config)} configs for {length} inputs"
) )
raise ValueError(msg)
if isinstance(config, Sequence): if isinstance(config, Sequence):
return list(map(ensure_config, config)) return list(map(ensure_config, config))

View File

@ -632,7 +632,8 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
else: else:
return (alt(), config) return (alt(), config)
else: else:
raise ValueError(f"Unknown alternative: {which}") msg = f"Unknown alternative: {which}"
raise ValueError(msg)
def _strremoveprefix(s: str, prefix: str) -> str: def _strremoveprefix(s: str, prefix: str) -> str:

View File

@ -152,10 +152,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output: ) -> Output:
if self.exception_key is not None and not isinstance(input, dict): if self.exception_key is not None and not isinstance(input, dict):
raise ValueError( msg = (
"If 'exception_key' is specified then input must be a dictionary." "If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input" f"However found a type of {type(input)} for input"
) )
raise ValueError(msg)
# setup callbacks # setup callbacks
config = ensure_config(config) config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config) callback_manager = get_callback_manager_for_config(config)
@ -192,7 +193,8 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
run_manager.on_chain_end(output) run_manager.on_chain_end(output)
return output return output
if first_error is None: if first_error is None:
raise ValueError("No error stored at end of fallbacks.") msg = "No error stored at end of fallbacks."
raise ValueError(msg)
run_manager.on_chain_error(first_error) run_manager.on_chain_error(first_error)
raise first_error raise first_error
@ -203,10 +205,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
**kwargs: Optional[Any], **kwargs: Optional[Any],
) -> Output: ) -> Output:
if self.exception_key is not None and not isinstance(input, dict): if self.exception_key is not None and not isinstance(input, dict):
raise ValueError( msg = (
"If 'exception_key' is specified then input must be a dictionary." "If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input" f"However found a type of {type(input)} for input"
) )
raise ValueError(msg)
# setup callbacks # setup callbacks
config = ensure_config(config) config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config) callback_manager = get_async_callback_manager_for_config(config)
@ -243,7 +246,8 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
await run_manager.on_chain_end(output) await run_manager.on_chain_end(output)
return output return output
if first_error is None: if first_error is None:
raise ValueError("No error stored at end of fallbacks.") msg = "No error stored at end of fallbacks."
raise ValueError(msg)
await run_manager.on_chain_error(first_error) await run_manager.on_chain_error(first_error)
raise first_error raise first_error
@ -260,10 +264,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
if self.exception_key is not None and not all( if self.exception_key is not None and not all(
isinstance(input, dict) for input in inputs isinstance(input, dict) for input in inputs
): ):
raise ValueError( msg = (
"If 'exception_key' is specified then inputs must be dictionaries." "If 'exception_key' is specified then inputs must be dictionaries."
f"However found a type of {type(inputs[0])} for input" f"However found a type of {type(inputs[0])} for input"
) )
raise ValueError(msg)
if not inputs: if not inputs:
return [] return []
@ -352,10 +357,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
if self.exception_key is not None and not all( if self.exception_key is not None and not all(
isinstance(input, dict) for input in inputs isinstance(input, dict) for input in inputs
): ):
raise ValueError( msg = (
"If 'exception_key' is specified then inputs must be dictionaries." "If 'exception_key' is specified then inputs must be dictionaries."
f"However found a type of {type(inputs[0])} for input" f"However found a type of {type(inputs[0])} for input"
) )
raise ValueError(msg)
if not inputs: if not inputs:
return [] return []
@ -447,10 +453,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
) -> Iterator[Output]: ) -> Iterator[Output]:
"""""" """"""
if self.exception_key is not None and not isinstance(input, dict): if self.exception_key is not None and not isinstance(input, dict):
raise ValueError( msg = (
"If 'exception_key' is specified then input must be a dictionary." "If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input" f"However found a type of {type(input)} for input"
) )
raise ValueError(msg)
# setup callbacks # setup callbacks
config = ensure_config(config) config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config) callback_manager = get_callback_manager_for_config(config)
@ -510,10 +517,11 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
**kwargs: Optional[Any], **kwargs: Optional[Any],
) -> AsyncIterator[Output]: ) -> AsyncIterator[Output]:
if self.exception_key is not None and not isinstance(input, dict): if self.exception_key is not None and not isinstance(input, dict):
raise ValueError( msg = (
"If 'exception_key' is specified then input must be a dictionary." "If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input" f"However found a type of {type(input)} for input"
) )
raise ValueError(msg)
# setup callbacks # setup callbacks
config = ensure_config(config) config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config) callback_manager = get_async_callback_manager_for_config(config)

View File

@ -330,7 +330,8 @@ class Graph:
ValueError: If a node with the same id already exists. ValueError: If a node with the same id already exists.
""" """
if id is not None and id in self.nodes: if id is not None and id in self.nodes:
raise ValueError(f"Node with id {id} already exists") msg = f"Node with id {id} already exists"
raise ValueError(msg)
id = id or self.next_id() id = id or self.next_id()
node = Node(id=id, data=data, metadata=metadata, name=node_data_str(id, data)) node = Node(id=id, data=data, metadata=metadata, name=node_data_str(id, data))
self.nodes[node.id] = node self.nodes[node.id] = node
@ -371,9 +372,11 @@ class Graph:
ValueError: If the source or target node is not in the graph. ValueError: If the source or target node is not in the graph.
""" """
if source.id not in self.nodes: if source.id not in self.nodes:
raise ValueError(f"Source node {source.id} not in graph") msg = f"Source node {source.id} not in graph"
raise ValueError(msg)
if target.id not in self.nodes: if target.id not in self.nodes:
raise ValueError(f"Target node {target.id} not in graph") msg = f"Target node {target.id} not in graph"
raise ValueError(msg)
edge = Edge( edge = Edge(
source=source.id, target=target.id, data=data, conditional=conditional source=source.id, target=target.id, data=data, conditional=conditional
) )

View File

@ -161,9 +161,8 @@ def _build_sugiyama_layout(
route_with_lines, route_with_lines,
) )
except ImportError as exc: except ImportError as exc:
raise ImportError( msg = "Install grandalf to draw graphs: `pip install grandalf`."
"Install grandalf to draw graphs: `pip install grandalf`." raise ImportError(msg) from exc
) from exc
# #
# Just a reminder about naming conventions: # Just a reminder about naming conventions:

View File

@ -104,11 +104,12 @@ def draw_mermaid(
if prefix and not self_loop: if prefix and not self_loop:
subgraph = prefix.split(":")[-1] subgraph = prefix.split(":")[-1]
if subgraph in seen_subgraphs: if subgraph in seen_subgraphs:
raise ValueError( msg = (
f"Found duplicate subgraph '{subgraph}' -- this likely means that " f"Found duplicate subgraph '{subgraph}' -- this likely means that "
"you're reusing a subgraph node with the same name. " "you're reusing a subgraph node with the same name. "
"Please adjust your graph to have subgraph nodes with unique names." "Please adjust your graph to have subgraph nodes with unique names."
) )
raise ValueError(msg)
seen_subgraphs.add(subgraph) seen_subgraphs.add(subgraph)
mermaid_graph += f"\tsubgraph {subgraph}\n" mermaid_graph += f"\tsubgraph {subgraph}\n"
@ -214,10 +215,11 @@ def draw_mermaid_png(
) )
else: else:
supported_methods = ", ".join([m.value for m in MermaidDrawMethod]) supported_methods = ", ".join([m.value for m in MermaidDrawMethod])
raise ValueError( msg = (
f"Invalid draw method: {draw_method}. " f"Invalid draw method: {draw_method}. "
f"Supported draw methods are: {supported_methods}" f"Supported draw methods are: {supported_methods}"
) )
raise ValueError(msg)
return img_bytes return img_bytes
@ -233,9 +235,8 @@ async def _render_mermaid_using_pyppeteer(
try: try:
from pyppeteer import launch # type: ignore[import] from pyppeteer import launch # type: ignore[import]
except ImportError as e: except ImportError as e:
raise ImportError( msg = "Install Pyppeteer to use the Pyppeteer method: `pip install pyppeteer`."
"Install Pyppeteer to use the Pyppeteer method: `pip install pyppeteer`." raise ImportError(msg) from e
) from e
browser = await launch() browser = await launch()
page = await browser.newPage() page = await browser.newPage()
@ -304,10 +305,11 @@ def _render_mermaid_using_api(
try: try:
import requests # type: ignore[import] import requests # type: ignore[import]
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"Install the `requests` module to use the Mermaid.INK API: " "Install the `requests` module to use the Mermaid.INK API: "
"`pip install requests`." "`pip install requests`."
) from e )
raise ImportError(msg) from e
# Use Mermaid API to render the image # Use Mermaid API to render the image
mermaid_syntax_encoded = base64.b64encode(mermaid_syntax.encode("utf8")).decode( mermaid_syntax_encoded = base64.b64encode(mermaid_syntax.encode("utf8")).decode(
@ -332,7 +334,8 @@ def _render_mermaid_using_api(
return img_bytes return img_bytes
else: else:
raise ValueError( msg = (
f"Failed to render the graph using the Mermaid.INK API. " f"Failed to render the graph using the Mermaid.INK API. "
f"Status code: {response.status_code}." f"Status code: {response.status_code}."
) )
raise ValueError(msg)

View File

@ -136,9 +136,8 @@ class PngDrawer:
try: try:
import pygraphviz as pgv # type: ignore[import] import pygraphviz as pgv # type: ignore[import]
except ImportError as exc: except ImportError as exc:
raise ImportError( msg = "Install pygraphviz to draw graphs: `pip install pygraphviz`."
"Install pygraphviz to draw graphs: `pip install pygraphviz`." raise ImportError(msg) from exc
) from exc
# Create a directed graph # Create a directed graph
viz = pgv.AGraph(directed=True, nodesep=0.9, ranksep=1.0) viz = pgv.AGraph(directed=True, nodesep=0.9, ranksep=1.0)

View File

@ -472,16 +472,16 @@ class RunnableWithMessageHistory(RunnableBindingBase):
# This occurs for chat models - since we batch inputs # This occurs for chat models - since we batch inputs
if isinstance(input_val[0], list): if isinstance(input_val[0], list):
if len(input_val) != 1: if len(input_val) != 1:
raise ValueError( msg = f"Expected a single list of messages. Got {input_val}."
f"Expected a single list of messages. Got {input_val}." raise ValueError(msg)
)
return input_val[0] return input_val[0]
return list(input_val) return list(input_val)
else: else:
raise ValueError( msg = (
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. " f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
f"Got {input_val}." f"Got {input_val}."
) )
raise ValueError(msg)
def _get_output_messages( def _get_output_messages(
self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict] self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict]
@ -513,10 +513,11 @@ class RunnableWithMessageHistory(RunnableBindingBase):
elif isinstance(output_val, (list, tuple)): elif isinstance(output_val, (list, tuple)):
return list(output_val) return list(output_val)
else: else:
raise ValueError( msg = (
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. " f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
f"Got {output_val}." f"Got {output_val}."
) )
raise ValueError(msg)
def _enter_history(self, input: Any, config: RunnableConfig) -> list[BaseMessage]: def _enter_history(self, input: Any, config: RunnableConfig) -> list[BaseMessage]:
hist: BaseChatMessageHistory = config["configurable"]["message_history"] hist: BaseChatMessageHistory = config["configurable"]["message_history"]
@ -593,12 +594,13 @@ class RunnableWithMessageHistory(RunnableBindingBase):
missing_key: "[your-value-here]" for missing_key in missing_keys missing_key: "[your-value-here]" for missing_key in missing_keys
} }
example_config = {"configurable": example_configurable} example_config = {"configurable": example_configurable}
raise ValueError( msg = (
f"Missing keys {sorted(missing_keys)} in config['configurable'] " f"Missing keys {sorted(missing_keys)} in config['configurable'] "
f"Expected keys are {sorted(expected_keys)}." f"Expected keys are {sorted(expected_keys)}."
f"When using via .invoke() or .stream(), pass in a config; " f"When using via .invoke() or .stream(), pass in a config; "
f"e.g., chain.invoke({example_input}, {example_config})" f"e.g., chain.invoke({example_input}, {example_config})"
) )
raise ValueError(msg)
if len(expected_keys) == 1: if len(expected_keys) == 1:
if parameter_names: if parameter_names:
@ -613,10 +615,11 @@ class RunnableWithMessageHistory(RunnableBindingBase):
else: else:
# otherwise verify that names of keys patch and invoke by named arguments # otherwise verify that names of keys patch and invoke by named arguments
if set(expected_keys) != set(parameter_names): if set(expected_keys) != set(parameter_names):
raise ValueError( msg = (
f"Expected keys {sorted(expected_keys)} do not match parameter " f"Expected keys {sorted(expected_keys)} do not match parameter "
f"names {sorted(parameter_names)} of get_session_history." f"names {sorted(parameter_names)} of get_session_history."
) )
raise ValueError(msg)
message_history = self.get_session_history( message_history = self.get_session_history(
**{key: configurable[key] for key in expected_keys} **{key: configurable[key] for key in expected_keys}

View File

@ -101,7 +101,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
key = input["key"] key = input["key"]
actual_input = input["input"] actual_input = input["input"]
if key not in self.runnables: if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'") msg = f"No runnable associated with key '{key}'"
raise ValueError(msg)
runnable = self.runnables[key] runnable = self.runnables[key]
return runnable.invoke(actual_input, config) return runnable.invoke(actual_input, config)
@ -115,7 +116,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
key = input["key"] key = input["key"]
actual_input = input["input"] actual_input = input["input"]
if key not in self.runnables: if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'") msg = f"No runnable associated with key '{key}'"
raise ValueError(msg)
runnable = self.runnables[key] runnable = self.runnables[key]
return await runnable.ainvoke(actual_input, config) return await runnable.ainvoke(actual_input, config)
@ -134,7 +136,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
keys = [input["key"] for input in inputs] keys = [input["key"] for input in inputs]
actual_inputs = [input["input"] for input in inputs] actual_inputs = [input["input"] for input in inputs]
if any(key not in self.runnables for key in keys): if any(key not in self.runnables for key in keys):
raise ValueError("One or more keys do not have a corresponding runnable") msg = "One or more keys do not have a corresponding runnable"
raise ValueError(msg)
def invoke( def invoke(
runnable: Runnable, input: Input, config: RunnableConfig runnable: Runnable, input: Input, config: RunnableConfig
@ -169,7 +172,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
keys = [input["key"] for input in inputs] keys = [input["key"] for input in inputs]
actual_inputs = [input["input"] for input in inputs] actual_inputs = [input["input"] for input in inputs]
if any(key not in self.runnables for key in keys): if any(key not in self.runnables for key in keys):
raise ValueError("One or more keys do not have a corresponding runnable") msg = "One or more keys do not have a corresponding runnable"
raise ValueError(msg)
async def ainvoke( async def ainvoke(
runnable: Runnable, input: Input, config: RunnableConfig runnable: Runnable, input: Input, config: RunnableConfig
@ -201,7 +205,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
key = input["key"] key = input["key"]
actual_input = input["input"] actual_input = input["input"]
if key not in self.runnables: if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'") msg = f"No runnable associated with key '{key}'"
raise ValueError(msg)
runnable = self.runnables[key] runnable = self.runnables[key]
yield from runnable.stream(actual_input, config) yield from runnable.stream(actual_input, config)
@ -215,7 +220,8 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
key = input["key"] key = input["key"]
actual_input = input["input"] actual_input = input["input"]
if key not in self.runnables: if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'") msg = f"No runnable associated with key '{key}'"
raise ValueError(msg)
runnable = self.runnables[key] runnable = self.runnables[key]
async for output in runnable.astream(actual_input, config): async for output in runnable.astream(actual_input, config):

View File

@ -639,10 +639,11 @@ def get_unique_config_specs(
if len(others) == 0 or all(o == first for o in others): if len(others) == 0 or all(o == first for o in others):
unique.append(first) unique.append(first)
else: else:
raise ValueError( msg = (
"RunnableSequence contains conflicting config specs" "RunnableSequence contains conflicting config specs"
f"for {id}: {[first] + others}" f"for {id}: {[first] + others}"
) )
raise ValueError(msg)
return unique return unique

View File

@ -24,19 +24,21 @@ class Visitor(ABC):
and self.allowed_operators is not None and self.allowed_operators is not None
and func not in self.allowed_operators and func not in self.allowed_operators
): ):
raise ValueError( msg = (
f"Received disallowed operator {func}. Allowed " f"Received disallowed operator {func}. Allowed "
f"comparators are {self.allowed_operators}" f"comparators are {self.allowed_operators}"
) )
raise ValueError(msg)
if ( if (
isinstance(func, Comparator) isinstance(func, Comparator)
and self.allowed_comparators is not None and self.allowed_comparators is not None
and func not in self.allowed_comparators and func not in self.allowed_comparators
): ):
raise ValueError( msg = (
f"Received disallowed comparator {func}. Allowed " f"Received disallowed comparator {func}. Allowed "
f"comparators are {self.allowed_comparators}" f"comparators are {self.allowed_comparators}"
) )
raise ValueError(msg)
@abstractmethod @abstractmethod
def visit_operation(self, operation: Operation) -> Any: def visit_operation(self, operation: Operation) -> Any:

View File

@ -127,9 +127,8 @@ def _validate_docstring_args_against_annotations(
"""Raise error if docstring arg is not in type annotations.""" """Raise error if docstring arg is not in type annotations."""
for docstring_arg in arg_descriptions: for docstring_arg in arg_descriptions:
if docstring_arg not in annotations: if docstring_arg not in annotations:
raise ValueError( msg = f"Arg {docstring_arg} in docstring not found in function signature."
f"Arg {docstring_arg} in docstring not found in function signature." raise ValueError(msg)
)
def _infer_arg_descriptions( def _infer_arg_descriptions(
@ -183,10 +182,11 @@ def _function_annotations_are_pydantic_v1(
for parameter in signature.parameters.values() for parameter in signature.parameters.values()
) )
if any_v1_annotations and any_v2_annotations: if any_v1_annotations and any_v2_annotations:
raise NotImplementedError( msg = (
f"Function {func} contains a mix of Pydantic v1 and v2 annotations. " f"Function {func} contains a mix of Pydantic v1 and v2 annotations. "
"Only one version of Pydantic annotations per function is supported." "Only one version of Pydantic annotations per function is supported."
) )
raise NotImplementedError(msg)
return any_v1_annotations and not any_v2_annotations return any_v1_annotations and not any_v2_annotations
@ -335,7 +335,7 @@ class ChildTool(BaseTool):
args_schema: Type[BaseModel] = SchemaClass args_schema: Type[BaseModel] = SchemaClass
...""" ..."""
name = cls.__name__ name = cls.__name__
raise SchemaAnnotationError( msg = (
f"Tool definition for {name} must include valid type annotations" f"Tool definition for {name} must include valid type annotations"
f" for argument 'args_schema' to behave as expected.\n" f" for argument 'args_schema' to behave as expected.\n"
f"Expected annotation of 'Type[BaseModel]'" f"Expected annotation of 'Type[BaseModel]'"
@ -343,6 +343,7 @@ class ChildTool(BaseTool):
f"Expected class looks like:\n" f"Expected class looks like:\n"
f"{typehint_mandate}" f"{typehint_mandate}"
) )
raise SchemaAnnotationError(msg)
name: str name: str
"""The unique name of the tool that clearly communicates its purpose.""" """The unique name of the tool that clearly communicates its purpose."""
@ -422,10 +423,11 @@ class ChildTool(BaseTool):
and kwargs["args_schema"] is not None and kwargs["args_schema"] is not None
and not is_basemodel_subclass(kwargs["args_schema"]) and not is_basemodel_subclass(kwargs["args_schema"])
): ):
raise TypeError( msg = (
f"args_schema must be a subclass of pydantic BaseModel. " f"args_schema must be a subclass of pydantic BaseModel. "
f"Got: {kwargs['args_schema']}." f"Got: {kwargs['args_schema']}."
) )
raise TypeError(msg)
super().__init__(**kwargs) super().__init__(**kwargs)
model_config = ConfigDict( model_config = ConfigDict(
@ -515,10 +517,11 @@ class ChildTool(BaseTool):
result = input_args.parse_obj(tool_input) result = input_args.parse_obj(tool_input)
result_dict = result.dict() result_dict = result.dict()
else: else:
raise NotImplementedError( msg = (
"args_schema must be a Pydantic BaseModel, " "args_schema must be a Pydantic BaseModel, "
f"got {self.args_schema}" f"got {self.args_schema}"
) )
raise NotImplementedError(msg)
return { return {
k: getattr(result, k) k: getattr(result, k)
for k, v in result_dict.items() for k, v in result_dict.items()
@ -653,12 +656,13 @@ class ChildTool(BaseTool):
response = context.run(self._run, *tool_args, **tool_kwargs) response = context.run(self._run, *tool_args, **tool_kwargs)
if self.response_format == "content_and_artifact": if self.response_format == "content_and_artifact":
if not isinstance(response, tuple) or len(response) != 2: if not isinstance(response, tuple) or len(response) != 2:
raise ValueError( msg = (
"Since response_format='content_and_artifact' " "Since response_format='content_and_artifact' "
"a two-tuple of the message content and raw tool output is " "a two-tuple of the message content and raw tool output is "
f"expected. Instead generated response of type: " f"expected. Instead generated response of type: "
f"{type(response)}." f"{type(response)}."
) )
raise ValueError(msg)
content, artifact = response content, artifact = response
else: else:
content = response content = response
@ -769,12 +773,13 @@ class ChildTool(BaseTool):
response = await coro response = await coro
if self.response_format == "content_and_artifact": if self.response_format == "content_and_artifact":
if not isinstance(response, tuple) or len(response) != 2: if not isinstance(response, tuple) or len(response) != 2:
raise ValueError( msg = (
"Since response_format='content_and_artifact' " "Since response_format='content_and_artifact' "
"a two-tuple of the message content and raw tool output is " "a two-tuple of the message content and raw tool output is "
f"expected. Instead generated response of type: " f"expected. Instead generated response of type: "
f"{type(response)}." f"{type(response)}."
) )
raise ValueError(msg)
content, artifact = response content, artifact = response
else: else:
content = response content = response
@ -825,10 +830,11 @@ def _handle_validation_error(
elif callable(flag): elif callable(flag):
content = flag(e) content = flag(e)
else: else:
raise ValueError( msg = (
f"Got unexpected type of `handle_validation_error`. Expected bool, " f"Got unexpected type of `handle_validation_error`. Expected bool, "
f"str or callable. Received: {flag}" f"str or callable. Received: {flag}"
) )
raise ValueError(msg)
return content return content
@ -844,10 +850,11 @@ def _handle_tool_error(
elif callable(flag): elif callable(flag):
content = flag(e) content = flag(e)
else: else:
raise ValueError( msg = (
f"Got unexpected type of `handle_tool_error`. Expected bool, str " f"Got unexpected type of `handle_tool_error`. Expected bool, str "
f"or callable. Received: {flag}" f"or callable. Received: {flag}"
) )
raise ValueError(msg)
return content return content

View File

@ -146,7 +146,8 @@ def tool(
runnable = dec_func runnable = dec_func
if runnable.input_schema.model_json_schema().get("type") != "object": if runnable.input_schema.model_json_schema().get("type") != "object":
raise ValueError("Runnable must have an object schema.") msg = "Runnable must have an object schema."
raise ValueError(msg)
async def ainvoke_wrapper( async def ainvoke_wrapper(
callbacks: Optional[Callbacks] = None, **kwargs: Any callbacks: Optional[Callbacks] = None, **kwargs: Any
@ -189,10 +190,11 @@ def tool(
# If someone doesn't want a schema applied, we must treat it as # If someone doesn't want a schema applied, we must treat it as
# a simple string->string function # a simple string->string function
if dec_func.__doc__ is None: if dec_func.__doc__ is None:
raise ValueError( msg = (
"Function must have a docstring if " "Function must have a docstring if "
"description not provided and infer_schema is False." "description not provided and infer_schema is False."
) )
raise ValueError(msg)
return Tool( return Tool(
name=tool_name, name=tool_name,
func=func, func=func,
@ -222,7 +224,8 @@ def tool(
return _partial return _partial
else: else:
raise ValueError("Too many arguments for tool decorator") msg = "Too many arguments for tool decorator"
raise ValueError(msg)
def _get_description_from_runnable(runnable: Runnable) -> str: def _get_description_from_runnable(runnable: Runnable) -> str:
@ -241,11 +244,12 @@ def _get_schema_from_runnable_and_arg_types(
try: try:
arg_types = get_type_hints(runnable.InputType) arg_types = get_type_hints(runnable.InputType)
except TypeError as e: except TypeError as e:
raise TypeError( msg = (
"Tool input must be str or dict. If dict, dict arguments must be " "Tool input must be str or dict. If dict, dict arguments must be "
"typed. Either annotate types (e.g., with TypedDict) or pass " "typed. Either annotate types (e.g., with TypedDict) or pass "
f"arg_types into `.as_tool` to specify. {str(e)}" f"arg_types into `.as_tool` to specify. {str(e)}"
) from e )
raise TypeError(msg) from e
fields = {key: (key_type, Field(...)) for key, key_type in arg_types.items()} fields = {key: (key_type, Field(...)) for key, key_type in arg_types.items()}
return create_model(name, **fields) # type: ignore return create_model(name, **fields) # type: ignore

View File

@ -68,11 +68,12 @@ class Tool(BaseTool):
# For backwards compatibility. The tool must be run with a single input # For backwards compatibility. The tool must be run with a single input
all_args = list(args) + list(kwargs.values()) all_args = list(args) + list(kwargs.values())
if len(all_args) != 1: if len(all_args) != 1:
raise ToolException( msg = (
f"""Too many arguments to single-input tool {self.name}. f"""Too many arguments to single-input tool {self.name}.
Consider using StructuredTool instead.""" Consider using StructuredTool instead."""
f" Args: {all_args}" f" Args: {all_args}"
) )
raise ToolException(msg)
return tuple(all_args), {} return tuple(all_args), {}
def _run( def _run(
@ -89,7 +90,8 @@ class Tool(BaseTool):
if config_param := _get_runnable_config_param(self.func): if config_param := _get_runnable_config_param(self.func):
kwargs[config_param] = config kwargs[config_param] = config
return self.func(*args, **kwargs) return self.func(*args, **kwargs)
raise NotImplementedError("Tool does not support sync invocation.") msg = "Tool does not support sync invocation."
raise NotImplementedError(msg)
async def _arun( async def _arun(
self, self,
@ -152,7 +154,8 @@ class Tool(BaseTool):
ValueError: If the function is not provided. ValueError: If the function is not provided.
""" """
if func is None and coroutine is None: if func is None and coroutine is None:
raise ValueError("Function and/or coroutine must be provided") msg = "Function and/or coroutine must be provided"
raise ValueError(msg)
return cls( return cls(
name=name, name=name,
func=func, func=func,

View File

@ -78,7 +78,8 @@ class StructuredTool(BaseTool):
if config_param := _get_runnable_config_param(self.func): if config_param := _get_runnable_config_param(self.func):
kwargs[config_param] = config kwargs[config_param] = config
return self.func(*args, **kwargs) return self.func(*args, **kwargs)
raise NotImplementedError("StructuredTool does not support sync invocation.") msg = "StructuredTool does not support sync invocation."
raise NotImplementedError(msg)
async def _arun( async def _arun(
self, self,
@ -167,7 +168,8 @@ class StructuredTool(BaseTool):
elif coroutine is not None: elif coroutine is not None:
source_function = coroutine source_function = coroutine
else: else:
raise ValueError("Function and/or coroutine must be provided") msg = "Function and/or coroutine must be provided"
raise ValueError(msg)
name = name or source_function.__name__ name = name or source_function.__name__
if args_schema is None and infer_schema: if args_schema is None and infer_schema:
# schema name is appended within function # schema name is appended within function
@ -184,9 +186,8 @@ class StructuredTool(BaseTool):
if description_ is None and args_schema: if description_ is None and args_schema:
description_ = args_schema.__doc__ or None description_ = args_schema.__doc__ or None
if description_ is None: if description_ is None:
raise ValueError( msg = "Function must have a docstring if description not provided."
"Function must have a docstring if description not provided." raise ValueError(msg)
)
if description is None: if description is None:
# Only apply if using the function's docstring # Only apply if using the function's docstring
description_ = textwrap.dedent(description_).strip() description_ = textwrap.dedent(description_).strip()

View File

@ -40,9 +40,10 @@ def tracing_enabled(
session_name: str = "default", session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]: ) -> Generator[TracerSessionV1, None, None]:
"""Throw an error because this has been replaced by tracing_v2_enabled.""" """Throw an error because this has been replaced by tracing_v2_enabled."""
raise RuntimeError( msg = (
"tracing_enabled is no longer supported. Please use tracing_enabled_v2 instead." "tracing_enabled is no longer supported. Please use tracing_enabled_v2 instead."
) )
raise RuntimeError(msg)
@contextmanager @contextmanager
@ -196,9 +197,8 @@ def register_configure_hook(
to a non-None value. to a non-None value.
""" """
if env_var is not None and handle_class is None: if env_var is not None and handle_class is None:
raise ValueError( msg = "If env_var is set, handle_class must also be set to a non-None value."
"If env_var is set, handle_class must also be set to a non-None value." raise ValueError(msg)
)
from langchain_core.callbacks.base import BaseCallbackHandler from langchain_core.callbacks.base import BaseCallbackHandler
_configure_hooks.append( _configure_hooks.append(

View File

@ -137,17 +137,19 @@ class _TracerCore(ABC):
try: try:
run = self.run_map[str(run_id)] run = self.run_map[str(run_id)]
except KeyError as exc: except KeyError as exc:
raise TracerException(f"No indexed run ID {run_id}.") from exc msg = f"No indexed run ID {run_id}."
raise TracerException(msg) from exc
if isinstance(run_type, str): if isinstance(run_type, str):
run_types: Union[set[str], None] = {run_type} run_types: Union[set[str], None] = {run_type}
else: else:
run_types = run_type run_types = run_type
if run_types is not None and run.run_type not in run_types: if run_types is not None and run.run_type not in run_types:
raise TracerException( msg = (
f"Found {run.run_type} run at ID {run_id}, " f"Found {run.run_type} run at ID {run_id}, "
f"but expected {run_types} run." f"but expected {run_types} run."
) )
raise TracerException(msg)
return run return run
def _create_chat_model_run( def _create_chat_model_run(
@ -170,10 +172,11 @@ class _TracerCore(ABC):
# This can eventually be cleaned up by writing a "modern" tracer # This can eventually be cleaned up by writing a "modern" tracer
# that has all the updated schema changes corresponding to # that has all the updated schema changes corresponding to
# the "streaming_events" format. # the "streaming_events" format.
raise NotImplementedError( msg = (
f"Chat model tracing is not supported in " f"Chat model tracing is not supported in "
f"for {self._schema_format} format." f"for {self._schema_format} format."
) )
raise NotImplementedError(msg)
start_time = datetime.now(timezone.utc) start_time = datetime.now(timezone.utc)
if metadata: if metadata:
kwargs.update({"metadata": metadata}) kwargs.update({"metadata": metadata})
@ -338,7 +341,8 @@ class _TracerCore(ABC):
"input": inputs, "input": inputs,
} }
else: else:
raise ValueError(f"Invalid format: {self._schema_format}") msg = f"Invalid format: {self._schema_format}"
raise ValueError(msg)
def _get_chain_outputs(self, outputs: Any) -> Any: def _get_chain_outputs(self, outputs: Any) -> Any:
"""Get the outputs for a chain run.""" """Get the outputs for a chain run."""
@ -349,7 +353,8 @@ class _TracerCore(ABC):
"output": outputs, "output": outputs,
} }
else: else:
raise ValueError(f"Invalid format: {self._schema_format}") msg = f"Invalid format: {self._schema_format}"
raise ValueError(msg)
def _complete_chain_run( def _complete_chain_run(
self, self,
@ -404,7 +409,8 @@ class _TracerCore(ABC):
elif self._schema_format == "streaming_events": elif self._schema_format == "streaming_events":
inputs = {"input": inputs} inputs = {"input": inputs}
else: else:
raise AssertionError(f"Invalid format: {self._schema_format}") msg = f"Invalid format: {self._schema_format}"
raise AssertionError(msg)
return Run( return Run(
id=run_id, id=run_id,

View File

@ -159,10 +159,11 @@ class EvaluatorCallbackHandler(BaseTracer):
elif isinstance(results, dict) and "results" in results: elif isinstance(results, dict) and "results" in results:
results_ = cast(list[EvaluationResult], results["results"]) results_ = cast(list[EvaluationResult], results["results"])
else: else:
raise TypeError( msg = (
f"Invalid evaluation result type {type(results)}." f"Invalid evaluation result type {type(results)}."
" Expected EvaluationResult or EvaluationResults." " Expected EvaluationResult or EvaluationResults."
) )
raise TypeError(msg)
return results_ return results_
def _log_evaluation_feedback( def _log_evaluation_feedback(

View File

@ -136,10 +136,11 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
while parent_id := self.parent_map.get(run_id): while parent_id := self.parent_map.get(run_id):
str_parent_id = str(parent_id) str_parent_id = str(parent_id)
if str_parent_id in parent_ids: if str_parent_id in parent_ids:
raise AssertionError( msg = (
f"Parent ID {parent_id} is already in the parent_ids list. " f"Parent ID {parent_id} is already in the parent_ids list. "
f"This should never happen." f"This should never happen."
) )
raise AssertionError(msg)
parent_ids.append(str_parent_id) parent_ids.append(str_parent_id)
run_id = parent_id run_id = parent_id
@ -411,7 +412,8 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
chunk_: Union[GenerationChunk, BaseMessageChunk] chunk_: Union[GenerationChunk, BaseMessageChunk]
if run_info is None: if run_info is None:
raise AssertionError(f"Run ID {run_id} not found in run map.") msg = f"Run ID {run_id} not found in run map."
raise AssertionError(msg)
if self.is_tapped.get(run_id): if self.is_tapped.get(run_id):
return return
if run_info["run_type"] == "chat_model": if run_info["run_type"] == "chat_model":
@ -429,7 +431,8 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
else: else:
chunk_ = cast(GenerationChunk, chunk) chunk_ = cast(GenerationChunk, chunk)
else: else:
raise ValueError(f"Unexpected run type: {run_info['run_type']}") msg = f"Unexpected run type: {run_info['run_type']}"
raise ValueError(msg)
self._send( self._send(
{ {
@ -484,7 +487,8 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
} }
event = "on_llm_end" event = "on_llm_end"
else: else:
raise ValueError(f"Unexpected run type: {run_info['run_type']}") msg = f"Unexpected run type: {run_info['run_type']}"
raise ValueError(msg)
self._send( self._send(
{ {
@ -626,10 +630,11 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
"""End a trace for a tool run.""" """End a trace for a tool run."""
run_info = self.run_map.pop(run_id) run_info = self.run_map.pop(run_id)
if "inputs" not in run_info: if "inputs" not in run_info:
raise AssertionError( msg = (
f"Run ID {run_id} is a tool call and is expected to have " f"Run ID {run_id} is a tool call and is expected to have "
f"inputs associated with it." f"inputs associated with it."
) )
raise AssertionError(msg)
inputs = run_info["inputs"] inputs = run_info["inputs"]
self._send( self._send(
@ -839,11 +844,12 @@ async def _astream_events_implementation_v1(
if event_type == "stream": if event_type == "stream":
num_chunks = len(log_entry["streamed_output"]) num_chunks = len(log_entry["streamed_output"])
if num_chunks != 1: if num_chunks != 1:
raise AssertionError( msg = (
f"Expected exactly one chunk of streamed output, " f"Expected exactly one chunk of streamed output, "
f"got {num_chunks} instead. This is impossible. " f"got {num_chunks} instead. This is impossible. "
f"Encountered in: {log_entry['name']}" f"Encountered in: {log_entry['name']}"
) )
raise AssertionError(msg)
data = {"chunk": log_entry["streamed_output"][0]} data = {"chunk": log_entry["streamed_output"][0]}
# Clean up the stream, we don't need it anymore. # Clean up the stream, we don't need it anymore.
@ -866,11 +872,12 @@ async def _astream_events_implementation_v1(
if state["streamed_output"]: if state["streamed_output"]:
num_chunks = len(state["streamed_output"]) num_chunks = len(state["streamed_output"])
if num_chunks != 1: if num_chunks != 1:
raise AssertionError( msg = (
f"Expected exactly one chunk of streamed output, " f"Expected exactly one chunk of streamed output, "
f"got {num_chunks} instead. This is impossible. " f"got {num_chunks} instead. This is impossible. "
f"Encountered in: {state['name']}" f"Encountered in: {state['name']}"
) )
raise AssertionError(msg)
data = {"chunk": state["streamed_output"][0]} data = {"chunk": state["streamed_output"][0]}
# Clean up the stream, we don't need it anymore. # Clean up the stream, we don't need it anymore.
@ -945,10 +952,11 @@ async def _astream_events_implementation_v2(
callbacks.add_handler(event_streamer, inherit=True) callbacks.add_handler(event_streamer, inherit=True)
config["callbacks"] = callbacks config["callbacks"] = callbacks
else: else:
raise ValueError( msg = (
f"Unexpected type for callbacks: {callbacks}." f"Unexpected type for callbacks: {callbacks}."
"Expected None, list or AsyncCallbackManager." "Expected None, list or AsyncCallbackManager."
) )
raise ValueError(msg)
# Call the runnable in streaming mode, # Call the runnable in streaming mode,
# add each chunk to the output stream # add each chunk to the output stream

View File

@ -191,7 +191,8 @@ class LangChainTracer(BaseTracer):
ValueError: If the run URL cannot be found. ValueError: If the run URL cannot be found.
""" """
if not self.latest_run: if not self.latest_run:
raise ValueError("No traced run found.") msg = "No traced run found."
raise ValueError(msg)
# If this is the first run in a project, the project may not yet be created. # If this is the first run in a project, the project may not yet be created.
# This method is only really useful for debugging flows, so we will assume # This method is only really useful for debugging flows, so we will assume
# there is some tolerace for latency. # there is some tolerace for latency.
@ -204,7 +205,8 @@ class LangChainTracer(BaseTracer):
return self.client.get_run_url( return self.client.get_run_url(
run=self.latest_run, project_name=self.project_name run=self.latest_run, project_name=self.project_name
) )
raise ValueError("Failed to get run URL.") msg = "Failed to get run URL."
raise ValueError(msg)
def _get_tags(self, run: Run) -> list[str]: def _get_tags(self, run: Run) -> list[str]:
"""Get combined tags for a run.""" """Get combined tags for a run."""

View File

@ -3,14 +3,16 @@ from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any: def get_headers(*args: Any, **kwargs: Any) -> Any:
"""Throw an error because this has been replaced by get_headers.""" """Throw an error because this has been replaced by get_headers."""
raise RuntimeError( msg = (
"get_headers for LangChainTracerV1 is no longer supported. " "get_headers for LangChainTracerV1 is no longer supported. "
"Please use LangChainTracer instead." "Please use LangChainTracer instead."
) )
raise RuntimeError(msg)
def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802 def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802
"""Throw an error because this has been replaced by LangChainTracer.""" """Throw an error because this has been replaced by LangChainTracer."""
raise RuntimeError( msg = (
"LangChainTracerV1 is no longer supported. Please use LangChainTracer instead." "LangChainTracerV1 is no longer supported. Please use LangChainTracer instead."
) )
raise RuntimeError(msg)

View File

@ -104,9 +104,8 @@ class RunLogPatch:
state = jsonpatch.apply_patch(None, copy.deepcopy(ops)) state = jsonpatch.apply_patch(None, copy.deepcopy(ops))
return RunLog(*ops, state=state) return RunLog(*ops, state=state)
raise TypeError( msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" raise TypeError(msg)
)
def __repr__(self) -> str: def __repr__(self) -> str:
from pprint import pformat from pprint import pformat
@ -134,9 +133,8 @@ class RunLog(RunLogPatch):
state = jsonpatch.apply_patch(self.state, other.ops) state = jsonpatch.apply_patch(self.state, other.ops)
return RunLog(*ops, state=state) return RunLog(*ops, state=state)
raise TypeError( msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" raise TypeError(msg)
)
def __repr__(self) -> str: def __repr__(self) -> str:
from pprint import pformat from pprint import pformat
@ -197,10 +195,11 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
ValueError: If an invalid schema format is provided (internal use only). ValueError: If an invalid schema format is provided (internal use only).
""" """
if _schema_format not in {"original", "streaming_events"}: if _schema_format not in {"original", "streaming_events"}:
raise ValueError( msg = (
f"Invalid schema format: {_schema_format}. " f"Invalid schema format: {_schema_format}. "
f"Expected one of 'original', 'streaming_events'." f"Expected one of 'original', 'streaming_events'."
) )
raise ValueError(msg)
super().__init__(_schema_format=_schema_format) super().__init__(_schema_format=_schema_format)
self.auto_close = auto_close self.auto_close = auto_close
@ -496,11 +495,12 @@ def _get_standardized_inputs(
None means that the input is not yet known! None means that the input is not yet known!
""" """
if schema_format == "original": if schema_format == "original":
raise NotImplementedError( msg = (
"Do not assign inputs with original schema drop the key for now." "Do not assign inputs with original schema drop the key for now."
"When inputs are added to astream_log they should be added with " "When inputs are added to astream_log they should be added with "
"standardized schema for streaming events." "standardized schema for streaming events."
) )
raise NotImplementedError(msg)
inputs = load(run.inputs) inputs = load(run.inputs)
@ -613,10 +613,11 @@ async def _astream_log_implementation(
callbacks.add_handler(stream, inherit=True) callbacks.add_handler(stream, inherit=True)
config["callbacks"] = callbacks config["callbacks"] = callbacks
else: else:
raise ValueError( msg = (
f"Unexpected type for callbacks: {callbacks}." f"Unexpected type for callbacks: {callbacks}."
"Expected None, list or AsyncCallbackManager." "Expected None, list or AsyncCallbackManager."
) )
raise ValueError(msg)
# Call the runnable in streaming mode, # Call the runnable in streaming mode,
# add each chunk to the output stream # add each chunk to the output stream

View File

@ -34,10 +34,11 @@ def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]
elif right_v is None: elif right_v is None:
continue continue
elif type(merged[right_k]) is not type(right_v): elif type(merged[right_k]) is not type(right_v):
raise TypeError( msg = (
f'additional_kwargs["{right_k}"] already exists in this message,' f'additional_kwargs["{right_k}"] already exists in this message,'
" but with a different type." " but with a different type."
) )
raise TypeError(msg)
elif isinstance(merged[right_k], str): elif isinstance(merged[right_k], str):
# TODO: Add below special handling for 'type' key in 0.3 and remove # TODO: Add below special handling for 'type' key in 0.3 and remove
# merge_lists 'type' logic. # merge_lists 'type' logic.
@ -60,10 +61,11 @@ def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]
elif merged[right_k] == right_v: elif merged[right_k] == right_v:
continue continue
else: else:
raise TypeError( msg = (
f"Additional kwargs key {right_k} already exists in left dict and " f"Additional kwargs key {right_k} already exists in left dict and "
f"value has unsupported type {type(merged[right_k])}." f"value has unsupported type {type(merged[right_k])}."
) )
raise TypeError(msg)
return merged return merged
@ -125,10 +127,11 @@ def merge_obj(left: Any, right: Any) -> Any:
if left is None or right is None: if left is None or right is None:
return left if left is not None else right return left if left is not None else right
elif type(left) is not type(right): elif type(left) is not type(right):
raise TypeError( msg = (
f"left and right are of different types. Left type: {type(left)}. Right " f"left and right are of different types. Left type: {type(left)}. Right "
f"type: {type(right)}." f"type: {type(right)}."
) )
raise TypeError(msg)
elif isinstance(left, str): elif isinstance(left, str):
return left + right return left + right
elif isinstance(left, dict): elif isinstance(left, dict):
@ -138,7 +141,8 @@ def merge_obj(left: Any, right: Any) -> Any:
elif left == right: elif left == right:
return left return left
else: else:
raise ValueError( msg = (
f"Unable to merge {left=} and {right=}. Both must be of type str, dict, or " f"Unable to merge {left=} and {right=}. Both must be of type str, dict, or "
f"list, or else be two equal objects." f"list, or else be two equal objects."
) )
raise ValueError(msg)

View File

@ -60,7 +60,8 @@ def py_anext(
Callable[[AsyncIterator[T]], Awaitable[T]], type(iterator).__anext__ Callable[[AsyncIterator[T]], Awaitable[T]], type(iterator).__anext__
) )
except AttributeError as e: except AttributeError as e:
raise TypeError(f"{iterator!r} is not an async iterator") from e msg = f"{iterator!r} is not an async iterator"
raise TypeError(msg) from e
if default is _no_default: if default is _no_default:
return __anext__(iterator) return __anext__(iterator)

View File

@ -73,8 +73,9 @@ def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
elif default is not None: elif default is not None:
return default return default
else: else:
raise ValueError( msg = (
f"Did not find {key}, please add an environment variable" f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass" f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter." f" `{key}` as a named parameter."
) )
raise ValueError(msg)

View File

@ -25,10 +25,11 @@ class StrictFormatter(Formatter):
ValueError: If any arguments are provided. ValueError: If any arguments are provided.
""" """
if len(args) > 0: if len(args) > 0:
raise ValueError( msg = (
"No arguments should be provided, " "No arguments should be provided, "
"everything should be passed as keyword arguments." "everything should be passed as keyword arguments."
) )
raise ValueError(msg)
return super().vformat(format_string, args, kwargs) return super().vformat(format_string, args, kwargs)
def validate_input_variables( def validate_input_variables(

View File

@ -105,7 +105,8 @@ def convert_pydantic_to_openai_function(
elif hasattr(model, "schema"): elif hasattr(model, "schema"):
schema = model.schema() # Pydantic 1 schema = model.schema() # Pydantic 1
else: else:
raise TypeError("Model must be a Pydantic model.") msg = "Model must be a Pydantic model."
raise TypeError(msg)
schema = dereference_refs(schema) schema = dereference_refs(schema)
if "definitions" in schema: # pydantic 1 if "definitions" in schema: # pydantic 1
schema.pop("definitions", None) schema.pop("definitions", None)
@ -237,11 +238,12 @@ def _convert_any_typed_dicts_to_pydantic(
if (field_desc := field_kwargs.get("description")) and not isinstance( if (field_desc := field_kwargs.get("description")) and not isinstance(
field_desc, str field_desc, str
): ):
raise ValueError( msg = (
f"Invalid annotation for field {arg}. Third argument to " f"Invalid annotation for field {arg}. Third argument to "
f"Annotated must be a string description, received value of " f"Annotated must be a string description, received value of "
f"type {type(field_desc)}." f"type {type(field_desc)}."
) )
raise ValueError(msg)
elif arg_desc := arg_descriptions.get(arg): elif arg_desc := arg_descriptions.get(arg):
field_kwargs["description"] = arg_desc field_kwargs["description"] = arg_desc
else: else:
@ -387,12 +389,13 @@ def convert_to_openai_function(
elif callable(function): elif callable(function):
oai_function = cast(dict, convert_python_function_to_openai_function(function)) oai_function = cast(dict, convert_python_function_to_openai_function(function))
else: else:
raise ValueError( msg = (
f"Unsupported function\n\n{function}\n\nFunctions must be passed in" f"Unsupported function\n\n{function}\n\nFunctions must be passed in"
" as Dict, pydantic.BaseModel, or Callable. If they're a dict they must" " as Dict, pydantic.BaseModel, or Callable. If they're a dict they must"
" either be in OpenAI function format or valid JSON schema with top-level" " either be in OpenAI function format or valid JSON schema with top-level"
" 'title' and 'description' keys." " 'title' and 'description' keys."
) )
raise ValueError(msg)
if strict is not None: if strict is not None:
oai_function["strict"] = strict oai_function["strict"] = strict
@ -553,7 +556,8 @@ def _parse_google_docstring(
if filtered_annotations and ( if filtered_annotations and (
len(docstring_blocks) < 2 or not docstring_blocks[1].startswith("Args:") len(docstring_blocks) < 2 or not docstring_blocks[1].startswith("Args:")
): ):
raise ValueError("Found invalid Google-Style docstring.") msg = "Found invalid Google-Style docstring."
raise ValueError(msg)
descriptors = [] descriptors = []
args_block = None args_block = None
past_descriptors = False past_descriptors = False
@ -571,7 +575,8 @@ def _parse_google_docstring(
description = " ".join(descriptors) description = " ".join(descriptors)
else: else:
if error_on_invalid_docstring: if error_on_invalid_docstring:
raise ValueError("Found invalid Google-Style docstring.") msg = "Found invalid Google-Style docstring."
raise ValueError(msg)
description = "" description = ""
args_block = None args_block = None
arg_descriptions = {} arg_descriptions = {}

View File

@ -179,11 +179,13 @@ def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict:
try: try:
json_obj = parse_json_markdown(text) json_obj = parse_json_markdown(text)
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
raise OutputParserException(f"Got invalid JSON object. Error: {e}") from e msg = f"Got invalid JSON object. Error: {e}"
raise OutputParserException(msg) from e
for key in expected_keys: for key in expected_keys:
if key not in json_obj: if key not in json_obj:
raise OutputParserException( msg = (
f"Got invalid return object. Expected key `{key}` " f"Got invalid return object. Expected key `{key}` "
f"to be present, but got {json_obj}" f"to be present, but got {json_obj}"
) )
raise OutputParserException(msg)
return json_obj return json_obj

View File

@ -8,10 +8,11 @@ from typing import Any, Optional
def _retrieve_ref(path: str, schema: dict) -> dict: def _retrieve_ref(path: str, schema: dict) -> dict:
components = path.split("/") components = path.split("/")
if components[0] != "#": if components[0] != "#":
raise ValueError( msg = (
"ref paths are expected to be URI fragments, meaning they should start " "ref paths are expected to be URI fragments, meaning they should start "
"with #." "with #."
) )
raise ValueError(msg)
out = schema out = schema
for component in components[1:]: for component in components[1:]:
if component in out: if component in out:
@ -19,7 +20,8 @@ def _retrieve_ref(path: str, schema: dict) -> dict:
elif component.isdigit() and int(component) in out: elif component.isdigit() and int(component) in out:
out = out[int(component)] out = out[int(component)]
else: else:
raise KeyError(f"Reference '{path}' not found.") msg = f"Reference '{path}' not found."
raise KeyError(msg)
return deepcopy(out) return deepcopy(out)

View File

@ -144,7 +144,8 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
try: try:
tag, template = template.split(r_del, 1) tag, template = template.split(r_del, 1)
except ValueError as e: except ValueError as e:
raise ChevronError("unclosed tag " f"at line {_CURRENT_LINE}") from e msg = "unclosed tag " f"at line {_CURRENT_LINE}"
raise ChevronError(msg) from e
# Find the type meaning of the first character # Find the type meaning of the first character
tag_type = tag_types.get(tag[0], "variable") tag_type = tag_types.get(tag[0], "variable")
@ -164,9 +165,8 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
# Otherwise we should complain # Otherwise we should complain
else: else:
raise ChevronError( msg = "unclosed set delimiter tag\n" f"at line {_CURRENT_LINE}"
"unclosed set delimiter tag\n" f"at line {_CURRENT_LINE}" raise ChevronError(msg)
)
elif ( elif (
# If we might be a no html escape tag # If we might be a no html escape tag
@ -275,18 +275,20 @@ def tokenize(
try: try:
last_section = open_sections.pop() last_section = open_sections.pop()
except IndexError as e: except IndexError as e:
raise ChevronError( msg = (
f'Trying to close tag "{tag_key}"\n' f'Trying to close tag "{tag_key}"\n'
"Looks like it was not opened.\n" "Looks like it was not opened.\n"
f"line {_CURRENT_LINE + 1}" f"line {_CURRENT_LINE + 1}"
) from e )
raise ChevronError(msg) from e
if tag_key != last_section: if tag_key != last_section:
# Otherwise we need to complain # Otherwise we need to complain
raise ChevronError( msg = (
f'Trying to close tag "{tag_key}"\n' f'Trying to close tag "{tag_key}"\n'
f'last open tag is "{last_section}"\n' f'last open tag is "{last_section}"\n'
f"line {_CURRENT_LINE + 1}" f"line {_CURRENT_LINE + 1}"
) )
raise ChevronError(msg)
# Do the second check to see if we're a standalone # Do the second check to see if we're a standalone
is_standalone = r_sa_check(template, tag_type, is_standalone) is_standalone = r_sa_check(template, tag_type, is_standalone)
@ -313,11 +315,12 @@ def tokenize(
# If there are any open sections when we're done # If there are any open sections when we're done
if open_sections: if open_sections:
# Then we need to complain # Then we need to complain
raise ChevronError( msg = (
"Unexpected EOF\n" "Unexpected EOF\n"
f'the tag "{open_sections[-1]}" was never closed\n' f'the tag "{open_sections[-1]}" was never closed\n'
f"was opened at line {_LAST_TAG_LINE}" f"was opened at line {_LAST_TAG_LINE}"
) )
raise ChevronError(msg)
# #

View File

@ -63,7 +63,8 @@ elif PYDANTIC_MAJOR_VERSION == 2:
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore
TypeBaseModel = Union[type[BaseModel], type[pydantic.BaseModel]] # type: ignore TypeBaseModel = Union[type[BaseModel], type[pydantic.BaseModel]] # type: ignore
else: else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel) TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
@ -116,7 +117,8 @@ def is_basemodel_subclass(cls: type) -> bool:
if issubclass(cls, BaseModelV1): if issubclass(cls, BaseModelV1):
return True return True
else: else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
return False return False
@ -144,7 +146,8 @@ def is_basemodel_instance(obj: Any) -> bool:
if isinstance(obj, BaseModelV1): if isinstance(obj, BaseModelV1):
return True return True
else: else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
return False return False
@ -233,9 +236,8 @@ def _create_subset_model_v1(
elif PYDANTIC_MAJOR_VERSION == 2: elif PYDANTIC_MAJOR_VERSION == 2:
from pydantic.v1 import create_model # type: ignore from pydantic.v1 import create_model # type: ignore
else: else:
raise NotImplementedError( msg = f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}"
f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}" raise NotImplementedError(msg)
)
fields = {} fields = {}
@ -339,9 +341,8 @@ def _create_subset_model(
fn_description=fn_description, fn_description=fn_description,
) )
else: else:
raise NotImplementedError( msg = f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}"
f"Unsupported pydantic version: {PYDANTIC_MAJOR_VERSION}" raise NotImplementedError(msg)
)
if PYDANTIC_MAJOR_VERSION == 2: if PYDANTIC_MAJOR_VERSION == 2:
@ -376,7 +377,8 @@ if PYDANTIC_MAJOR_VERSION == 2:
elif hasattr(model, "__fields__"): elif hasattr(model, "__fields__"):
return model.__fields__ # type: ignore return model.__fields__ # type: ignore
else: else:
raise TypeError(f"Expected a Pydantic model. Got {type(model)}") msg = f"Expected a Pydantic model. Got {type(model)}"
raise TypeError(msg)
elif PYDANTIC_MAJOR_VERSION == 1: elif PYDANTIC_MAJOR_VERSION == 1:
from pydantic import BaseModel as BaseModelV1_ from pydantic import BaseModel as BaseModelV1_
@ -386,7 +388,8 @@ elif PYDANTIC_MAJOR_VERSION == 1:
"""Get the field names of a Pydantic model.""" """Get the field names of a Pydantic model."""
return model.__fields__ # type: ignore return model.__fields__ # type: ignore
else: else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
_SchemaConfig = ConfigDict( _SchemaConfig = ConfigDict(
arbitrary_types_allowed=True, frozen=True, protected_namespaces=() arbitrary_types_allowed=True, frozen=True, protected_namespaces=()
@ -536,11 +539,12 @@ def _remap_field_definitions(field_definitions: dict[str, Any]) -> dict[str, Any
if key.startswith("_") or key in _RESERVED_NAMES: if key.startswith("_") or key in _RESERVED_NAMES:
# Let's add a prefix to avoid colliding with internal pydantic fields # Let's add a prefix to avoid colliding with internal pydantic fields
if isinstance(value, FieldInfo): if isinstance(value, FieldInfo):
raise NotImplementedError( msg = (
f"Remapping for fields starting with '_' or fields with a name " f"Remapping for fields starting with '_' or fields with a name "
f"matching a reserved name {_RESERVED_NAMES} is not supported if " f"matching a reserved name {_RESERVED_NAMES} is not supported if "
f" the field is a pydantic Field instance. Got {key}." f" the field is a pydantic Field instance. Got {key}."
) )
raise NotImplementedError(msg)
type_, default_ = value type_, default_ = value
remapped[f"private_{key}"] = ( remapped[f"private_{key}"] = (
type_, type_,
@ -583,10 +587,11 @@ def create_model_v2(
if root: if root:
if field_definitions: if field_definitions:
raise NotImplementedError( msg = (
"When specifying __root__ no other " "When specifying __root__ no other "
f"fields should be provided. Got {field_definitions}" f"fields should be provided. Got {field_definitions}"
) )
raise NotImplementedError(msg)
if isinstance(root, tuple): if isinstance(root, tuple):
kwargs = {"type_": root[0], "default_": root[1]} kwargs = {"type_": root[0], "default_": root[1]}

View File

@ -44,11 +44,12 @@ def xor_args(*arg_groups: tuple[str, ...]) -> Callable:
invalid_groups = [i for i, count in enumerate(counts) if count != 1] invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups: if invalid_groups:
invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups] invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups]
raise ValueError( msg = (
"Exactly one argument in each of the following" "Exactly one argument in each of the following"
" groups must be defined:" " groups must be defined:"
f" {', '.join(invalid_group_names)}" f" {', '.join(invalid_group_names)}"
) )
raise ValueError(msg)
return func(*args, **kwargs) return func(*args, **kwargs)
return wrapper return wrapper
@ -134,10 +135,11 @@ def guard_import(
module = importlib.import_module(module_name, package) module = importlib.import_module(module_name, package)
except (ImportError, ModuleNotFoundError) as e: except (ImportError, ModuleNotFoundError) as e:
pip_name = pip_name or module_name.split(".")[0].replace("_", "-") pip_name = pip_name or module_name.split(".")[0].replace("_", "-")
raise ImportError( msg = (
f"Could not import {module_name} python package. " f"Could not import {module_name} python package. "
f"Please install it with `pip install {pip_name}`." f"Please install it with `pip install {pip_name}`."
) from e )
raise ImportError(msg) from e
return module return module
@ -166,25 +168,29 @@ def check_package_version(
""" """
imported_version = parse(version(package)) imported_version = parse(version(package))
if lt_version is not None and imported_version >= parse(lt_version): if lt_version is not None and imported_version >= parse(lt_version):
raise ValueError( msg = (
f"Expected {package} version to be < {lt_version}. Received " f"Expected {package} version to be < {lt_version}. Received "
f"{imported_version}." f"{imported_version}."
) )
raise ValueError(msg)
if lte_version is not None and imported_version > parse(lte_version): if lte_version is not None and imported_version > parse(lte_version):
raise ValueError( msg = (
f"Expected {package} version to be <= {lte_version}. Received " f"Expected {package} version to be <= {lte_version}. Received "
f"{imported_version}." f"{imported_version}."
) )
raise ValueError(msg)
if gt_version is not None and imported_version <= parse(gt_version): if gt_version is not None and imported_version <= parse(gt_version):
raise ValueError( msg = (
f"Expected {package} version to be > {gt_version}. Received " f"Expected {package} version to be > {gt_version}. Received "
f"{imported_version}." f"{imported_version}."
) )
raise ValueError(msg)
if gte_version is not None and imported_version < parse(gte_version): if gte_version is not None and imported_version < parse(gte_version):
raise ValueError( msg = (
f"Expected {package} version to be >= {gte_version}. Received " f"Expected {package} version to be >= {gte_version}. Received "
f"{imported_version}." f"{imported_version}."
) )
raise ValueError(msg)
def get_pydantic_field_names(pydantic_cls: Any) -> set[str]: def get_pydantic_field_names(pydantic_cls: Any) -> set[str]:
@ -230,7 +236,8 @@ def _build_model_kwargs(
extra_kwargs = values.get("model_kwargs", {}) extra_kwargs = values.get("model_kwargs", {})
for field_name in list(values): for field_name in list(values):
if field_name in extra_kwargs: if field_name in extra_kwargs:
raise ValueError(f"Found {field_name} supplied twice.") msg = f"Found {field_name} supplied twice."
raise ValueError(msg)
if field_name not in all_required_field_names: if field_name not in all_required_field_names:
warnings.warn( warnings.warn(
f"""WARNING! {field_name} is not default parameter. f"""WARNING! {field_name} is not default parameter.
@ -276,7 +283,8 @@ def build_extra_kwargs(
""" """
for field_name in list(values): for field_name in list(values):
if field_name in extra_kwargs: if field_name in extra_kwargs:
raise ValueError(f"Found {field_name} supplied twice.") msg = f"Found {field_name} supplied twice."
raise ValueError(msg)
if field_name not in all_required_field_names: if field_name not in all_required_field_names:
warnings.warn( warnings.warn(
f"""WARNING! {field_name} is not default parameter. f"""WARNING! {field_name} is not default parameter.
@ -288,10 +296,11 @@ def build_extra_kwargs(
invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys()) invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys())
if invalid_model_kwargs: if invalid_model_kwargs:
raise ValueError( msg = (
f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter." f"Instead they were passed in as part of `model_kwargs` parameter."
) )
raise ValueError(msg)
return extra_kwargs return extra_kwargs
@ -386,11 +395,12 @@ def from_env(
if error_message: if error_message:
raise ValueError(error_message) raise ValueError(error_message)
else: else:
raise ValueError( msg = (
f"Did not find {key}, please add an environment variable" f"Did not find {key}, please add an environment variable"
f" `{key}` which contains it, or pass" f" `{key}` which contains it, or pass"
f" `{key}` as a named parameter." f" `{key}` as a named parameter."
) )
raise ValueError(msg)
return get_from_env_fn return get_from_env_fn
@ -449,10 +459,11 @@ def secret_from_env(
if error_message: if error_message:
raise ValueError(error_message) raise ValueError(error_message)
else: else:
raise ValueError( msg = (
f"Did not find {key}, please add an environment variable" f"Did not find {key}, please add an environment variable"
f" `{key}` which contains it, or pass" f" `{key}` which contains it, or pass"
f" `{key}` as a named parameter." f" `{key}` as a named parameter."
) )
raise ValueError(msg)
return get_secret_from_env return get_secret_from_env

View File

@ -93,10 +93,11 @@ class VectorStore(ABC):
texts if isinstance(texts, (list, tuple)) else list(texts) texts if isinstance(texts, (list, tuple)) else list(texts)
) )
if metadatas and len(metadatas) != len(texts_): if metadatas and len(metadatas) != len(texts_):
raise ValueError( msg = (
"The number of metadatas must match the number of texts." "The number of metadatas must match the number of texts."
f"Got {len(metadatas)} metadatas and {len(texts_)} texts." f"Got {len(metadatas)} metadatas and {len(texts_)} texts."
) )
raise ValueError(msg)
metadatas_ = iter(metadatas) if metadatas else cycle([{}]) metadatas_ = iter(metadatas) if metadatas else cycle([{}])
ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None]) ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None])
docs = [ docs = [
@ -108,9 +109,8 @@ class VectorStore(ABC):
kwargs["ids"] = ids kwargs["ids"] = ids
return self.add_documents(docs, **kwargs) return self.add_documents(docs, **kwargs)
raise NotImplementedError( msg = f"`add_texts` has not been implemented for {self.__class__.__name__} "
f"`add_texts` has not been implemented for {self.__class__.__name__} " raise NotImplementedError(msg)
)
@property @property
def embeddings(self) -> Optional[Embeddings]: def embeddings(self) -> Optional[Embeddings]:
@ -133,7 +133,8 @@ class VectorStore(ABC):
False otherwise, None if not implemented. False otherwise, None if not implemented.
""" """
raise NotImplementedError("delete method must be implemented by subclass.") msg = "delete method must be implemented by subclass."
raise NotImplementedError(msg)
def get_by_ids(self, ids: Sequence[str], /) -> list[Document]: def get_by_ids(self, ids: Sequence[str], /) -> list[Document]:
"""Get documents by their IDs. """Get documents by their IDs.
@ -159,9 +160,8 @@ class VectorStore(ABC):
.. versionadded:: 0.2.11 .. versionadded:: 0.2.11
""" """
raise NotImplementedError( msg = f"{self.__class__.__name__} does not yet support get_by_ids."
f"{self.__class__.__name__} does not yet support get_by_ids." raise NotImplementedError(msg)
)
# Implementations should override this method to provide an async native version. # Implementations should override this method to provide an async native version.
async def aget_by_ids(self, ids: Sequence[str], /) -> list[Document]: async def aget_by_ids(self, ids: Sequence[str], /) -> list[Document]:
@ -243,10 +243,11 @@ class VectorStore(ABC):
texts if isinstance(texts, (list, tuple)) else list(texts) texts if isinstance(texts, (list, tuple)) else list(texts)
) )
if metadatas and len(metadatas) != len(texts_): if metadatas and len(metadatas) != len(texts_):
raise ValueError( msg = (
"The number of metadatas must match the number of texts." "The number of metadatas must match the number of texts."
f"Got {len(metadatas)} metadatas and {len(texts_)} texts." f"Got {len(metadatas)} metadatas and {len(texts_)} texts."
) )
raise ValueError(msg)
metadatas_ = iter(metadatas) if metadatas else cycle([{}]) metadatas_ = iter(metadatas) if metadatas else cycle([{}])
ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None]) ids_: Iterator[Optional[str]] = iter(ids) if ids else cycle([None])
@ -284,10 +285,11 @@ class VectorStore(ABC):
texts = [doc.page_content for doc in documents] texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents] metadatas = [doc.metadata for doc in documents]
return self.add_texts(texts, metadatas, **kwargs) return self.add_texts(texts, metadatas, **kwargs)
raise NotImplementedError( msg = (
f"`add_documents` and `add_texts` has not been implemented " f"`add_documents` and `add_texts` has not been implemented "
f"for {self.__class__.__name__} " f"for {self.__class__.__name__} "
) )
raise NotImplementedError(msg)
async def aadd_documents( async def aadd_documents(
self, documents: list[Document], **kwargs: Any self, documents: list[Document], **kwargs: Any
@ -347,11 +349,12 @@ class VectorStore(ABC):
elif search_type == "mmr": elif search_type == "mmr":
return self.max_marginal_relevance_search(query, **kwargs) return self.max_marginal_relevance_search(query, **kwargs)
else: else:
raise ValueError( msg = (
f"search_type of {search_type} not allowed. Expected " f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold'" "search_type to be 'similarity', 'similarity_score_threshold'"
" or 'mmr'." " or 'mmr'."
) )
raise ValueError(msg)
async def asearch( async def asearch(
self, query: str, search_type: str, **kwargs: Any self, query: str, search_type: str, **kwargs: Any
@ -381,10 +384,11 @@ class VectorStore(ABC):
elif search_type == "mmr": elif search_type == "mmr":
return await self.amax_marginal_relevance_search(query, **kwargs) return await self.amax_marginal_relevance_search(query, **kwargs)
else: else:
raise ValueError( msg = (
f"search_type of {search_type} not allowed. Expected " f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold' or 'mmr'." "search_type to be 'similarity', 'similarity_score_threshold' or 'mmr'."
) )
raise ValueError(msg)
@abstractmethod @abstractmethod
def similarity_search( def similarity_search(
@ -1035,17 +1039,19 @@ class VectorStoreRetriever(BaseRetriever):
""" """
search_type = values.get("search_type", "similarity") search_type = values.get("search_type", "similarity")
if search_type not in cls.allowed_search_types: if search_type not in cls.allowed_search_types:
raise ValueError( msg = (
f"search_type of {search_type} not allowed. Valid values are: " f"search_type of {search_type} not allowed. Valid values are: "
f"{cls.allowed_search_types}" f"{cls.allowed_search_types}"
) )
raise ValueError(msg)
if search_type == "similarity_score_threshold": if search_type == "similarity_score_threshold":
score_threshold = values.get("search_kwargs", {}).get("score_threshold") score_threshold = values.get("search_kwargs", {}).get("score_threshold")
if (score_threshold is None) or (not isinstance(score_threshold, float)): if (score_threshold is None) or (not isinstance(score_threshold, float)):
raise ValueError( msg = (
"`score_threshold` is not specified with a float value(0~1) " "`score_threshold` is not specified with a float value(0~1) "
"in `search_kwargs`." "in `search_kwargs`."
) )
raise ValueError(msg)
return values return values
def _get_ls_params(self, **kwargs: Any) -> LangSmithRetrieverParams: def _get_ls_params(self, **kwargs: Any) -> LangSmithRetrieverParams:
@ -1084,7 +1090,8 @@ class VectorStoreRetriever(BaseRetriever):
query, **self.search_kwargs query, **self.search_kwargs
) )
else: else:
raise ValueError(f"search_type of {self.search_type} not allowed.") msg = f"search_type of {self.search_type} not allowed."
raise ValueError(msg)
return docs return docs
async def _aget_relevant_documents( async def _aget_relevant_documents(
@ -1106,7 +1113,8 @@ class VectorStoreRetriever(BaseRetriever):
query, **self.search_kwargs query, **self.search_kwargs
) )
else: else:
raise ValueError(f"search_type of {self.search_type} not allowed.") msg = f"search_type of {self.search_type} not allowed."
raise ValueError(msg)
return docs return docs
def add_documents(self, documents: list[Document], **kwargs: Any) -> list[str]: def add_documents(self, documents: list[Document], **kwargs: Any) -> list[str]:

View File

@ -175,10 +175,11 @@ class InMemoryVectorStore(VectorStore):
vectors = self.embedding.embed_documents(texts) vectors = self.embedding.embed_documents(texts)
if ids and len(ids) != len(texts): if ids and len(ids) != len(texts):
raise ValueError( msg = (
f"ids must be the same length as texts. " f"ids must be the same length as texts. "
f"Got {len(ids)} ids and {len(texts)} texts." f"Got {len(ids)} ids and {len(texts)} texts."
) )
raise ValueError(msg)
id_iterator: Iterator[Optional[str]] = ( id_iterator: Iterator[Optional[str]] = (
iter(ids) if ids else iter(doc.id for doc in documents) iter(ids) if ids else iter(doc.id for doc in documents)
@ -207,10 +208,11 @@ class InMemoryVectorStore(VectorStore):
vectors = await self.embedding.aembed_documents(texts) vectors = await self.embedding.aembed_documents(texts)
if ids and len(ids) != len(texts): if ids and len(ids) != len(texts):
raise ValueError( msg = (
f"ids must be the same length as texts. " f"ids must be the same length as texts. "
f"Got {len(ids)} ids and {len(texts)} texts." f"Got {len(ids)} ids and {len(texts)} texts."
) )
raise ValueError(msg)
id_iterator: Iterator[Optional[str]] = ( id_iterator: Iterator[Optional[str]] = (
iter(ids) if ids else iter(doc.id for doc in documents) iter(ids) if ids else iter(doc.id for doc in documents)
@ -432,10 +434,11 @@ class InMemoryVectorStore(VectorStore):
try: try:
import numpy as np import numpy as np
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"numpy must be installed to use max_marginal_relevance_search " "numpy must be installed to use max_marginal_relevance_search "
"pip install numpy" "pip install numpy"
) from e )
raise ImportError(msg) from e
mmr_chosen_indices = maximal_marginal_relevance( mmr_chosen_indices = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32), np.array(embedding, dtype=np.float32),

View File

@ -35,10 +35,11 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
try: try:
import numpy as np import numpy as np
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"cosine_similarity requires numpy to be installed. " "cosine_similarity requires numpy to be installed. "
"Please install numpy with `pip install numpy`." "Please install numpy with `pip install numpy`."
) from e )
raise ImportError(msg) from e
if len(x) == 0 or len(y) == 0: if len(x) == 0 or len(y) == 0:
return np.array([]) return np.array([])
@ -46,10 +47,11 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
x = np.array(x) x = np.array(x)
y = np.array(y) y = np.array(y)
if x.shape[1] != y.shape[1]: if x.shape[1] != y.shape[1]:
raise ValueError( msg = (
f"Number of columns in X and Y must be the same. X has shape {x.shape} " f"Number of columns in X and Y must be the same. X has shape {x.shape} "
f"and Y has shape {y.shape}." f"and Y has shape {y.shape}."
) )
raise ValueError(msg)
try: try:
import simsimd as simd # type: ignore[import-not-found] import simsimd as simd # type: ignore[import-not-found]
@ -94,10 +96,11 @@ def maximal_marginal_relevance(
try: try:
import numpy as np import numpy as np
except ImportError as e: except ImportError as e:
raise ImportError( msg = (
"maximal_marginal_relevance requires numpy to be installed. " "maximal_marginal_relevance requires numpy to be installed. "
"Please install numpy with `pip install numpy`." "Please install numpy with `pip install numpy`."
) from e )
raise ImportError(msg) from e
if min(k, len(embedding_list)) <= 0: if min(k, len(embedding_list)) <= 0:
return [] return []

View File

@ -44,7 +44,7 @@ python = ">=3.12.4"
[tool.poetry.extras] [tool.poetry.extras]
[tool.ruff.lint] [tool.ruff.lint]
select = [ "B", "C4", "E", "F", "I", "N", "PIE", "SIM", "T201", "UP", "W",] select = [ "B", "C4", "E", "EM", "F", "I", "N", "PIE", "SIM", "T201", "UP", "W",]
ignore = [ "UP007", "W293",] ignore = [ "UP007", "W293",]
[tool.coverage.run] [tool.coverage.run]

View File

@ -47,7 +47,8 @@ def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) ->
only_core = config.getoption("--only-core") or False only_core = config.getoption("--only-core") or False
if only_extended and only_core: if only_extended and only_core:
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.") msg = "Cannot specify both `--only-extended` and `--only-core`."
raise ValueError(msg)
for item in items: for item in items:
requires_marker = item.get_closest_marker("requires") requires_marker = item.get_closest_marker("requires")

View File

@ -66,11 +66,13 @@ class InMemoryCacheBad(BaseCache):
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string.""" """Look up based on prompt and llm_string."""
raise NotImplementedError("This code should not be triggered") msg = "This code should not be triggered"
raise NotImplementedError(msg)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string.""" """Update cache based on prompt and llm_string."""
raise NotImplementedError("This code should not be triggered") msg = "This code should not be triggered"
raise NotImplementedError(msg)
def clear(self, **kwargs: Any) -> None: def clear(self, **kwargs: Any) -> None:
"""Clear cache.""" """Clear cache."""

View File

@ -76,17 +76,20 @@ def test_simple_serialization_secret() -> None:
def test__is_field_useful() -> None: def test__is_field_useful() -> None:
class ArrayObj: class ArrayObj:
def __bool__(self) -> bool: def __bool__(self) -> bool:
raise ValueError("Truthiness can't be determined") msg = "Truthiness can't be determined"
raise ValueError(msg)
def __eq__(self, other: object) -> bool: def __eq__(self, other: object) -> bool:
return self # type: ignore[return-value] return self # type: ignore[return-value]
class NonBoolObj: class NonBoolObj:
def __bool__(self) -> bool: def __bool__(self) -> bool:
raise ValueError("Truthiness can't be determined") msg = "Truthiness can't be determined"
raise ValueError(msg)
def __eq__(self, other: object) -> bool: def __eq__(self, other: object) -> bool:
raise ValueError("Equality can't be determined") msg = "Equality can't be determined"
raise ValueError(msg)
default_x = ArrayObj() default_x = ArrayObj()
default_y = NonBoolObj() default_y = NonBoolObj()

View File

@ -32,15 +32,13 @@ def test_base_generation_parser() -> None:
that support streaming that support streaming
""" """
if len(result) != 1: if len(result) != 1:
raise NotImplementedError( msg = "This output parser can only be used with a single generation."
"This output parser can only be used with a single generation." raise NotImplementedError(msg)
)
generation = result[0] generation = result[0]
if not isinstance(generation, ChatGeneration): if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations # Say that this one only works with chat generations
raise OutputParserException( msg = "This output parser can only be used with a chat generation."
"This output parser can only be used with a chat generation." raise OutputParserException(msg)
)
content = generation.message.content content = generation.message.content
assert isinstance(content, str) assert isinstance(content, str)
@ -77,15 +75,13 @@ def test_base_transform_output_parser() -> None:
that support streaming that support streaming
""" """
if len(result) != 1: if len(result) != 1:
raise NotImplementedError( msg = "This output parser can only be used with a single generation."
"This output parser can only be used with a single generation." raise NotImplementedError(msg)
)
generation = result[0] generation = result[0]
if not isinstance(generation, ChatGeneration): if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations # Say that this one only works with chat generations
raise OutputParserException( msg = "This output parser can only be used with a chat generation."
"This output parser can only be used with a chat generation." raise OutputParserException(msg)
)
content = generation.message.content content = generation.message.content
assert isinstance(content, str) assert isinstance(content, str)
return content.swapcase() # type: ignore return content.swapcase() # type: ignore

View File

@ -618,7 +618,8 @@ def test_prompt_falsy_vars(
elif template_format == "mustache": elif template_format == "mustache":
template = "{{my_var}}" template = "{{my_var}}"
else: else:
raise ValueError(f"Invalid template format: {template_format}") msg = f"Invalid template format: {template_format}"
raise ValueError(msg)
prompt = PromptTemplate.from_template(template, template_format=template_format) prompt = PromptTemplate.from_template(template, template_format=template_format)

View File

@ -77,9 +77,8 @@ def _schema(obj: Any) -> dict:
"""Return the schema of the object.""" """Return the schema of the object."""
if not is_basemodel_subclass(obj): if not is_basemodel_subclass(obj):
raise TypeError( msg = f"Object must be a Pydantic BaseModel subclass. Got {type(obj)}"
f"Object must be a Pydantic BaseModel subclass. Got {type(obj)}" raise TypeError(msg)
)
# Remap to old style schema # Remap to old style schema
if not hasattr(obj, "model_json_schema"): # V1 model if not hasattr(obj, "model_json_schema"): # V1 model
return obj.schema() return obj.schema()

View File

@ -23,7 +23,8 @@ class MyRunnable(RunnableSerializable[str, str]):
@classmethod @classmethod
def my_error(cls, values: dict[str, Any]) -> Any: def my_error(cls, values: dict[str, Any]) -> Any:
if "_my_hidden_property" in values: if "_my_hidden_property" in values:
raise ValueError("Cannot set _my_hidden_property") msg = "Cannot set _my_hidden_property"
raise ValueError(msg)
return values return values
@model_validator(mode="after") @model_validator(mode="after")

View File

@ -2824,7 +2824,8 @@ async def test_higher_order_lambda_runnable(
elif input["key"] == "english": elif input["key"] == "english":
return itemgetter("input") | english_chain return itemgetter("input") | english_chain
else: else:
raise ValueError(f"Unknown key: {input['key']}") msg = f"Unknown key: {input['key']}"
raise ValueError(msg)
chain: Runnable = input_map | router chain: Runnable = input_map | router
assert dumps(chain, pretty=True) == snapshot assert dumps(chain, pretty=True) == snapshot
@ -2873,7 +2874,8 @@ async def test_higher_order_lambda_runnable(
elif input["key"] == "english": elif input["key"] == "english":
return itemgetter("input") | english_chain return itemgetter("input") | english_chain
else: else:
raise ValueError(f"Unknown key: {input['key']}") msg = f"Unknown key: {input['key']}"
raise ValueError(msg)
achain: Runnable = input_map | arouter achain: Runnable = input_map | arouter
math_spy = mocker.spy(math_chain.__class__, "ainvoke") math_spy = mocker.spy(math_chain.__class__, "ainvoke")
@ -3065,7 +3067,8 @@ def test_map_stream() -> None:
streamed_chunks[0] == {"llm": "i"} streamed_chunks[0] == {"llm": "i"}
or {"chat": _any_id_ai_message_chunk(content="i")} or {"chat": _any_id_ai_message_chunk(content="i")}
): ):
raise AssertionError(f"Got an unexpected chunk: {streamed_chunks[0]}") msg = f"Got an unexpected chunk: {streamed_chunks[0]}"
raise AssertionError(msg)
assert len(streamed_chunks) == len(llm_res) + len(chat_res) assert len(streamed_chunks) == len(llm_res) + len(chat_res)
@ -3714,9 +3717,11 @@ def test_recursive_lambda() -> None:
def test_retrying(mocker: MockerFixture) -> None: def test_retrying(mocker: MockerFixture) -> None:
def _lambda(x: int) -> Union[int, Runnable]: def _lambda(x: int) -> Union[int, Runnable]:
if x == 1: if x == 1:
raise ValueError("x is 1") msg = "x is 1"
raise ValueError(msg)
elif x == 2: elif x == 2:
raise RuntimeError("x is 2") msg = "x is 2"
raise RuntimeError(msg)
else: else:
return x return x
@ -3777,9 +3782,11 @@ def test_retrying(mocker: MockerFixture) -> None:
async def test_async_retrying(mocker: MockerFixture) -> None: async def test_async_retrying(mocker: MockerFixture) -> None:
def _lambda(x: int) -> Union[int, Runnable]: def _lambda(x: int) -> Union[int, Runnable]:
if x == 1: if x == 1:
raise ValueError("x is 1") msg = "x is 1"
raise ValueError(msg)
elif x == 2: elif x == 2:
raise RuntimeError("x is 2") msg = "x is 2"
raise RuntimeError(msg)
else: else:
return x return x
@ -3872,7 +3879,8 @@ def test_runnable_lambda_stream_with_callbacks() -> None:
def raise_value_error(x: int) -> int: def raise_value_error(x: int) -> int:
"""Raise a value error.""" """Raise a value error."""
raise ValueError("x is too large") msg = "x is too large"
raise ValueError(msg)
# Check that the chain on error is invoked # Check that the chain on error is invoked
with pytest.raises(ValueError): with pytest.raises(ValueError):
@ -3950,7 +3958,8 @@ async def test_runnable_lambda_astream_with_callbacks() -> None:
def raise_value_error(x: int) -> int: def raise_value_error(x: int) -> int:
"""Raise a value error.""" """Raise a value error."""
raise ValueError("x is too large") msg = "x is too large"
raise ValueError(msg)
# Check that the chain on error is invoked # Check that the chain on error is invoked
with pytest.raises(ValueError): with pytest.raises(ValueError):
@ -4285,7 +4294,8 @@ def test_runnable_branch_invoke() -> None:
# Test with single branch # Test with single branch
def raise_value_error(x: int) -> int: def raise_value_error(x: int) -> int:
"""Raise a value error.""" """Raise a value error."""
raise ValueError("x is too large") msg = "x is too large"
raise ValueError(msg)
branch = RunnableBranch[int, int]( branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error), (lambda x: x > 100, raise_value_error),
@ -4349,7 +4359,8 @@ def test_runnable_branch_invoke_callbacks() -> None:
def raise_value_error(x: int) -> int: def raise_value_error(x: int) -> int:
"""Raise a value error.""" """Raise a value error."""
raise ValueError("x is too large") msg = "x is too large"
raise ValueError(msg)
branch = RunnableBranch[int, int]( branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error), (lambda x: x > 100, raise_value_error),
@ -4376,7 +4387,8 @@ async def test_runnable_branch_ainvoke_callbacks() -> None:
async def raise_value_error(x: int) -> int: async def raise_value_error(x: int) -> int:
"""Raise a value error.""" """Raise a value error."""
raise ValueError("x is too large") msg = "x is too large"
raise ValueError(msg)
branch = RunnableBranch[int, int]( branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error), (lambda x: x > 100, raise_value_error),
@ -4430,7 +4442,8 @@ def test_runnable_branch_stream_with_callbacks() -> None:
def raise_value_error(x: str) -> Any: def raise_value_error(x: str) -> Any:
"""Raise a value error.""" """Raise a value error."""
raise ValueError(f"x is {x}") msg = f"x is {x}"
raise ValueError(msg)
llm_res = "i'm a textbot" llm_res = "i'm a textbot"
# sleep to better simulate a real stream # sleep to better simulate a real stream
@ -4507,7 +4520,8 @@ async def test_runnable_branch_astream_with_callbacks() -> None:
def raise_value_error(x: str) -> Any: def raise_value_error(x: str) -> Any:
"""Raise a value error.""" """Raise a value error."""
raise ValueError(f"x is {x}") msg = f"x is {x}"
raise ValueError(msg)
llm_res = "i'm a textbot" llm_res = "i'm a textbot"
# sleep to better simulate a real stream # sleep to better simulate a real stream

View File

@ -1596,7 +1596,8 @@ async def test_event_stream_with_retry() -> None:
def fail(inputs: str) -> None: def fail(inputs: str) -> None:
"""Simple func.""" """Simple func."""
raise Exception("fail") msg = "fail"
raise Exception(msg)
chain = RunnableLambda(success) | RunnableLambda(fail).with_retry( chain = RunnableLambda(success) | RunnableLambda(fail).with_retry(
stop_after_attempt=1, stop_after_attempt=1,

View File

@ -1552,7 +1552,8 @@ async def test_event_stream_with_retry() -> None:
def fail(inputs: str) -> None: def fail(inputs: str) -> None:
"""Simple func.""" """Simple func."""
raise Exception("fail") msg = "fail"
raise Exception(msg)
chain = RunnableLambda(success) | RunnableLambda(fail).with_retry( chain = RunnableLambda(success) | RunnableLambda(fail).with_retry(
stop_after_attempt=1, stop_after_attempt=1,
@ -2057,7 +2058,8 @@ class StreamingRunnable(Runnable[Input, Output]):
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output: ) -> Output:
"""Invoke the runnable.""" """Invoke the runnable."""
raise ValueError("Server side error") msg = "Server side error"
raise ValueError(msg)
def stream( def stream(
self, self,

View File

@ -293,7 +293,8 @@ async def test_runnable_sequence_parallel_trace_nesting(method: str) -> None:
elif method == "abatch": elif method == "abatch":
res = (await parent.abatch([1], {"callbacks": cb}))[0] # type: ignore res = (await parent.abatch([1], {"callbacks": cb}))[0] # type: ignore
else: else:
raise ValueError(f"Unknown method {method}") msg = f"Unknown method {method}"
raise ValueError(msg)
assert res == 3 assert res == 3
posts = _get_posts(mock_client_) posts = _get_posts(mock_client_)
name_order = [ name_order = [
@ -345,7 +346,8 @@ async def test_runnable_sequence_parallel_trace_nesting(method: str) -> None:
), f"{name} not after {name_order[i-1]}" ), f"{name} not after {name_order[i-1]}"
prev_dotted_order = dotted_order prev_dotted_order = dotted_order
if name in dotted_order_map: if name in dotted_order_map:
raise ValueError(f"Duplicate name {name}") msg = f"Duplicate name {name}"
raise ValueError(msg)
dotted_order_map[name] = dotted_order dotted_order_map[name] = dotted_order
id_map[name] = posts[i]["id"] id_map[name] = posts[i]["id"]
parent_id_map[name] = posts[i].get("parent_run_id") parent_id_map[name] = posts[i].get("parent_run_id")

View File

@ -52,4 +52,5 @@ def test_importable_all_via_subprocess() -> None:
result = future.result() # Will raise an exception if the callable raised result = future.result() # Will raise an exception if the callable raised
code, module_name = result code, module_name = result
if code != 0: if code != 0:
raise ValueError(f"Failed to import {module_name}.") msg = f"Failed to import {module_name}."
raise ValueError(msg)

View File

@ -106,7 +106,8 @@ def test_is_basemodel_subclass() -> None:
assert is_basemodel_subclass(BaseModelV1) assert is_basemodel_subclass(BaseModelV1)
else: else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
def test_is_basemodel_instance() -> None: def test_is_basemodel_instance() -> None:
@ -132,7 +133,8 @@ def test_is_basemodel_instance() -> None:
assert is_basemodel_instance(Bar(x=5)) assert is_basemodel_instance(Bar(x=5))
else: else:
raise ValueError(f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}") msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Only tests Pydantic v2") @pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Only tests Pydantic v2")

View File

@ -177,7 +177,8 @@ def test_guard_import(
elif package is not None and pip_name is not None: elif package is not None and pip_name is not None:
ret = guard_import(module_name, pip_name=pip_name, package=package) ret = guard_import(module_name, pip_name=pip_name, package=package)
else: else:
raise ValueError("Invalid test case") msg = "Invalid test case"
raise ValueError(msg)
assert ret == expected assert ret == expected
@ -204,7 +205,8 @@ def test_guard_import_failure(
elif package is not None and pip_name is not None: elif package is not None and pip_name is not None:
guard_import(module_name, pip_name=pip_name, package=package) guard_import(module_name, pip_name=pip_name, package=package)
else: else:
raise ValueError("Invalid test case") msg = "Invalid test case"
raise ValueError(msg)
pip_name = pip_name or module_name.split(".")[0].replace("_", "-") pip_name = pip_name or module_name.split(".")[0].replace("_", "-")
err_msg = ( err_msg = (
f"Could not import {module_name} python package. " f"Could not import {module_name} python package. "