diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 516485654ce..0cfaf434a3a 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -409,7 +409,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): generation = chunk else: generation += chunk - assert generation is not None + if generation is None: + msg = "No generation chunks were returned" + raise ValueError(msg) except BaseException as e: run_manager.on_llm_error( e, @@ -485,7 +487,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): generation = chunk else: generation += chunk - assert generation is not None + if generation is None: + msg = "No generation chunks were returned" + raise ValueError(msg) except BaseException as e: await run_manager.on_llm_error( e, diff --git a/libs/core/langchain_core/language_models/fake_chat_models.py b/libs/core/langchain_core/language_models/fake_chat_models.py index 95d605b7cb4..2f3c74a4f5c 100644 --- a/libs/core/langchain_core/language_models/fake_chat_models.py +++ b/libs/core/langchain_core/language_models/fake_chat_models.py @@ -258,7 +258,10 @@ class GenericFakeChatModel(BaseChatModel): if content: # Use a regular expression to split on whitespace with a capture group # so that we can preserve the whitespace in the output. - assert isinstance(content, str) + if not isinstance(content, str): + msg = "Expected content to be a string." + raise ValueError(msg) + content_chunks = cast(list[str], re.split(r"(\s)", content)) for token in content_chunks: diff --git a/libs/core/langchain_core/language_models/llms.py b/libs/core/langchain_core/language_models/llms.py index 7fd47e627d7..72073852101 100644 --- a/libs/core/langchain_core/language_models/llms.py +++ b/libs/core/langchain_core/language_models/llms.py @@ -565,7 +565,9 @@ class BaseLLM(BaseLanguageModel[str], ABC): generation = chunk else: generation += chunk - assert generation is not None + if generation is None: + msg = "No generation chunks were returned" + raise ValueError(msg) except BaseException as e: run_manager.on_llm_error( e, @@ -633,7 +635,9 @@ class BaseLLM(BaseLanguageModel[str], ABC): generation = chunk else: generation += chunk - assert generation is not None + if generation is None: + msg = "No generation chunks were returned" + raise ValueError(msg) except BaseException as e: await run_manager.on_llm_error( e, @@ -875,16 +879,24 @@ class BaseLLM(BaseLanguageModel[str], ABC): ) ): # We've received a list of callbacks args to apply to each input - assert len(callbacks) == len(prompts) - assert tags is None or ( + if len(callbacks) != len(prompts): + msg = "callbacks must be the same length as prompts" + raise ValueError(msg) + if tags is not None and not ( isinstance(tags, list) and len(tags) == len(prompts) - ) - assert metadata is None or ( + ): + msg = "tags must be a list of the same length as prompts" + raise ValueError(msg) + if metadata is not None and not ( isinstance(metadata, list) and len(metadata) == len(prompts) - ) - assert run_name is None or ( + ): + msg = "metadata must be a list of the same length as prompts" + raise ValueError(msg) + if run_name is not None and not ( isinstance(run_name, list) and len(run_name) == len(prompts) - ) + ): + msg = "run_name must be a list of the same length as prompts" + raise ValueError(msg) callbacks = cast(list[Callbacks], callbacks) tags_list = cast(list[Optional[list[str]]], tags or ([None] * len(prompts))) metadata_list = cast( @@ -1109,16 +1121,24 @@ class BaseLLM(BaseLanguageModel[str], ABC): or callbacks[0] is None ): # We've received a list of callbacks args to apply to each input - assert len(callbacks) == len(prompts) - assert tags is None or ( + if len(callbacks) != len(prompts): + msg = "callbacks must be the same length as prompts" + raise ValueError(msg) + if tags is not None and not ( isinstance(tags, list) and len(tags) == len(prompts) - ) - assert metadata is None or ( + ): + msg = "tags must be a list of the same length as prompts" + raise ValueError(msg) + if metadata is not None and not ( isinstance(metadata, list) and len(metadata) == len(prompts) - ) - assert run_name is None or ( + ): + msg = "metadata must be a list of the same length as prompts" + raise ValueError(msg) + if run_name is not None and not ( isinstance(run_name, list) and len(run_name) == len(prompts) - ) + ): + msg = "run_name must be a list of the same length as prompts" + raise ValueError(msg) callbacks = cast(list[Callbacks], callbacks) tags_list = cast(list[Optional[list[str]]], tags or ([None] * len(prompts))) metadata_list = cast( diff --git a/libs/core/langchain_core/runnables/graph_ascii.py b/libs/core/langchain_core/runnables/graph_ascii.py index f2a031ba43d..96ea29424ee 100644 --- a/libs/core/langchain_core/runnables/graph_ascii.py +++ b/libs/core/langchain_core/runnables/graph_ascii.py @@ -46,8 +46,9 @@ class AsciiCanvas: TIMEOUT = 10 def __init__(self, cols: int, lines: int) -> None: - assert cols > 1 - assert lines > 1 + if cols <= 1 or lines <= 1: + msg = "Canvas dimensions should be > 1" + raise ValueError(msg) self.cols = cols self.lines = lines @@ -70,11 +71,15 @@ class AsciiCanvas: char (str): character to place in the specified point on the canvas. """ - assert len(char) == 1 - assert x >= 0 - assert x < self.cols - assert y >= 0 - assert y < self.lines + if len(char) != 1: + msg = "char should be a single character" + raise ValueError(msg) + if x >= self.cols or x < 0: + msg = "x should be >= 0 and < number of columns" + raise ValueError(msg) + if y >= self.lines or y < 0: + msg = "y should be >= 0 and < number of lines" + raise ValueError(msg) self.canvas[y][x] = char @@ -130,8 +135,9 @@ class AsciiCanvas: width (int): box width. height (int): box height. """ - assert width > 1 - assert height > 1 + if width <= 1 or height <= 1: + msg = "Box dimensions should be > 1" + raise ValueError(msg) width -= 1 height -= 1 @@ -265,7 +271,9 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str: # NOTE: first draw edges so that node boxes could overwrite them for edge in sug.g.sE: - assert len(edge.view._pts) > 1 + if len(edge.view._pts) <= 1: + msg = "Not enough points to draw an edge" + raise ValueError(msg) for index in range(1, len(edge.view._pts)): start = edge.view._pts[index - 1] end = edge.view._pts[index] @@ -275,10 +283,15 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str: end_x = int(round(end[0] - minx)) end_y = int(round(end[1] - miny)) - assert start_x >= 0 - assert start_y >= 0 - assert end_x >= 0 - assert end_y >= 0 + if start_x < 0 or start_y < 0 or end_x < 0 or end_y < 0: + msg = ( + "Invalid edge coordinates: " + f"start_x={start_x}, " + f"start_y={start_y}, " + f"end_x={end_x}, " + f"end_y={end_y}" + ) + raise ValueError(msg) canvas.line(start_x, start_y, end_x, end_y, "." if edge.data else "*") diff --git a/libs/core/langchain_core/runnables/passthrough.py b/libs/core/langchain_core/runnables/passthrough.py index 7e7fa7b45bd..9881def4669 100644 --- a/libs/core/langchain_core/runnables/passthrough.py +++ b/libs/core/langchain_core/runnables/passthrough.py @@ -472,9 +472,9 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): config: RunnableConfig, **kwargs: Any, ) -> dict[str, Any]: - assert isinstance( - input, dict - ), "The input to RunnablePassthrough.assign() must be a dict." + if not isinstance(input, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) return { **input, @@ -500,9 +500,9 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): config: RunnableConfig, **kwargs: Any, ) -> dict[str, Any]: - assert isinstance( - input, dict - ), "The input to RunnablePassthrough.assign() must be a dict." + if not isinstance(input, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) return { **input, @@ -553,9 +553,9 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): ) # consume passthrough stream for chunk in for_passthrough: - assert isinstance( - chunk, dict - ), "The input to RunnablePassthrough.assign() must be a dict." + if not isinstance(chunk, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) # remove mapper keys from passthrough chunk, to be overwritten by map filtered = AddableDict( {k: v for k, v in chunk.items() if k not in mapper_keys} @@ -603,9 +603,10 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]): ) # consume passthrough stream async for chunk in for_passthrough: - assert isinstance( - chunk, dict - ), "The input to RunnablePassthrough.assign() must be a dict." + if not isinstance(chunk, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) + # remove mapper keys from passthrough chunk, to be overwritten by map output filtered = AddableDict( {k: v for k, v in chunk.items() if k not in mapper_keys} @@ -705,9 +706,9 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]): return super().get_name(suffix, name=name) def _pick(self, input: dict[str, Any]) -> Any: - assert isinstance( - input, dict - ), "The input to RunnablePassthrough.assign() must be a dict." + if not isinstance(input, dict): + msg = "The input to RunnablePassthrough.assign() must be a dict." + raise ValueError(msg) if isinstance(self.keys, str): return input.get(self.keys) diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 1342e7f1310..8a0aea6a31b 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -45,7 +45,7 @@ python = ">=3.12.4" [tool.ruff.lint] select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "UP", "W", "YTT",] -ignore = [ "COM812", "UP007", "S101", "S110", "S112",] +ignore = [ "COM812", "UP007", "S110", "S112",] [tool.coverage.run] omit = [ "tests/*",]