core: Add ruff rule S101 (no assert) (#29267)

Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
This commit is contained in:
Christophe Bornet 2025-01-20 21:24:31 +01:00 committed by GitHub
parent e5d62c6ce7
commit 989eec4b7b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 90 additions and 49 deletions

View File

@ -409,7 +409,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
generation = chunk generation = chunk
else: else:
generation += chunk generation += chunk
assert generation is not None if generation is None:
msg = "No generation chunks were returned"
raise ValueError(msg)
except BaseException as e: except BaseException as e:
run_manager.on_llm_error( run_manager.on_llm_error(
e, e,
@ -485,7 +487,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
generation = chunk generation = chunk
else: else:
generation += chunk generation += chunk
assert generation is not None if generation is None:
msg = "No generation chunks were returned"
raise ValueError(msg)
except BaseException as e: except BaseException as e:
await run_manager.on_llm_error( await run_manager.on_llm_error(
e, e,

View File

@ -258,7 +258,10 @@ class GenericFakeChatModel(BaseChatModel):
if content: if content:
# Use a regular expression to split on whitespace with a capture group # Use a regular expression to split on whitespace with a capture group
# so that we can preserve the whitespace in the output. # so that we can preserve the whitespace in the output.
assert isinstance(content, str) if not isinstance(content, str):
msg = "Expected content to be a string."
raise ValueError(msg)
content_chunks = cast(list[str], re.split(r"(\s)", content)) content_chunks = cast(list[str], re.split(r"(\s)", content))
for token in content_chunks: for token in content_chunks:

View File

@ -565,7 +565,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
generation = chunk generation = chunk
else: else:
generation += chunk generation += chunk
assert generation is not None if generation is None:
msg = "No generation chunks were returned"
raise ValueError(msg)
except BaseException as e: except BaseException as e:
run_manager.on_llm_error( run_manager.on_llm_error(
e, e,
@ -633,7 +635,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
generation = chunk generation = chunk
else: else:
generation += chunk generation += chunk
assert generation is not None if generation is None:
msg = "No generation chunks were returned"
raise ValueError(msg)
except BaseException as e: except BaseException as e:
await run_manager.on_llm_error( await run_manager.on_llm_error(
e, e,
@ -875,16 +879,24 @@ class BaseLLM(BaseLanguageModel[str], ABC):
) )
): ):
# We've received a list of callbacks args to apply to each input # We've received a list of callbacks args to apply to each input
assert len(callbacks) == len(prompts) if len(callbacks) != len(prompts):
assert tags is None or ( msg = "callbacks must be the same length as prompts"
raise ValueError(msg)
if tags is not None and not (
isinstance(tags, list) and len(tags) == len(prompts) isinstance(tags, list) and len(tags) == len(prompts)
) ):
assert metadata is None or ( msg = "tags must be a list of the same length as prompts"
raise ValueError(msg)
if metadata is not None and not (
isinstance(metadata, list) and len(metadata) == len(prompts) isinstance(metadata, list) and len(metadata) == len(prompts)
) ):
assert run_name is None or ( msg = "metadata must be a list of the same length as prompts"
raise ValueError(msg)
if run_name is not None and not (
isinstance(run_name, list) and len(run_name) == len(prompts) isinstance(run_name, list) and len(run_name) == len(prompts)
) ):
msg = "run_name must be a list of the same length as prompts"
raise ValueError(msg)
callbacks = cast(list[Callbacks], callbacks) callbacks = cast(list[Callbacks], callbacks)
tags_list = cast(list[Optional[list[str]]], tags or ([None] * len(prompts))) tags_list = cast(list[Optional[list[str]]], tags or ([None] * len(prompts)))
metadata_list = cast( metadata_list = cast(
@ -1109,16 +1121,24 @@ class BaseLLM(BaseLanguageModel[str], ABC):
or callbacks[0] is None or callbacks[0] is None
): ):
# We've received a list of callbacks args to apply to each input # We've received a list of callbacks args to apply to each input
assert len(callbacks) == len(prompts) if len(callbacks) != len(prompts):
assert tags is None or ( msg = "callbacks must be the same length as prompts"
raise ValueError(msg)
if tags is not None and not (
isinstance(tags, list) and len(tags) == len(prompts) isinstance(tags, list) and len(tags) == len(prompts)
) ):
assert metadata is None or ( msg = "tags must be a list of the same length as prompts"
raise ValueError(msg)
if metadata is not None and not (
isinstance(metadata, list) and len(metadata) == len(prompts) isinstance(metadata, list) and len(metadata) == len(prompts)
) ):
assert run_name is None or ( msg = "metadata must be a list of the same length as prompts"
raise ValueError(msg)
if run_name is not None and not (
isinstance(run_name, list) and len(run_name) == len(prompts) isinstance(run_name, list) and len(run_name) == len(prompts)
) ):
msg = "run_name must be a list of the same length as prompts"
raise ValueError(msg)
callbacks = cast(list[Callbacks], callbacks) callbacks = cast(list[Callbacks], callbacks)
tags_list = cast(list[Optional[list[str]]], tags or ([None] * len(prompts))) tags_list = cast(list[Optional[list[str]]], tags or ([None] * len(prompts)))
metadata_list = cast( metadata_list = cast(

View File

@ -46,8 +46,9 @@ class AsciiCanvas:
TIMEOUT = 10 TIMEOUT = 10
def __init__(self, cols: int, lines: int) -> None: def __init__(self, cols: int, lines: int) -> None:
assert cols > 1 if cols <= 1 or lines <= 1:
assert lines > 1 msg = "Canvas dimensions should be > 1"
raise ValueError(msg)
self.cols = cols self.cols = cols
self.lines = lines self.lines = lines
@ -70,11 +71,15 @@ class AsciiCanvas:
char (str): character to place in the specified point on the char (str): character to place in the specified point on the
canvas. canvas.
""" """
assert len(char) == 1 if len(char) != 1:
assert x >= 0 msg = "char should be a single character"
assert x < self.cols raise ValueError(msg)
assert y >= 0 if x >= self.cols or x < 0:
assert y < self.lines msg = "x should be >= 0 and < number of columns"
raise ValueError(msg)
if y >= self.lines or y < 0:
msg = "y should be >= 0 and < number of lines"
raise ValueError(msg)
self.canvas[y][x] = char self.canvas[y][x] = char
@ -130,8 +135,9 @@ class AsciiCanvas:
width (int): box width. width (int): box width.
height (int): box height. height (int): box height.
""" """
assert width > 1 if width <= 1 or height <= 1:
assert height > 1 msg = "Box dimensions should be > 1"
raise ValueError(msg)
width -= 1 width -= 1
height -= 1 height -= 1
@ -265,7 +271,9 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str:
# NOTE: first draw edges so that node boxes could overwrite them # NOTE: first draw edges so that node boxes could overwrite them
for edge in sug.g.sE: for edge in sug.g.sE:
assert len(edge.view._pts) > 1 if len(edge.view._pts) <= 1:
msg = "Not enough points to draw an edge"
raise ValueError(msg)
for index in range(1, len(edge.view._pts)): for index in range(1, len(edge.view._pts)):
start = edge.view._pts[index - 1] start = edge.view._pts[index - 1]
end = edge.view._pts[index] end = edge.view._pts[index]
@ -275,10 +283,15 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str:
end_x = int(round(end[0] - minx)) end_x = int(round(end[0] - minx))
end_y = int(round(end[1] - miny)) end_y = int(round(end[1] - miny))
assert start_x >= 0 if start_x < 0 or start_y < 0 or end_x < 0 or end_y < 0:
assert start_y >= 0 msg = (
assert end_x >= 0 "Invalid edge coordinates: "
assert end_y >= 0 f"start_x={start_x}, "
f"start_y={start_y}, "
f"end_x={end_x}, "
f"end_y={end_y}"
)
raise ValueError(msg)
canvas.line(start_x, start_y, end_x, end_y, "." if edge.data else "*") canvas.line(start_x, start_y, end_x, end_y, "." if edge.data else "*")

View File

@ -472,9 +472,9 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
config: RunnableConfig, config: RunnableConfig,
**kwargs: Any, **kwargs: Any,
) -> dict[str, Any]: ) -> dict[str, Any]:
assert isinstance( if not isinstance(input, dict):
input, dict msg = "The input to RunnablePassthrough.assign() must be a dict."
), "The input to RunnablePassthrough.assign() must be a dict." raise ValueError(msg)
return { return {
**input, **input,
@ -500,9 +500,9 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
config: RunnableConfig, config: RunnableConfig,
**kwargs: Any, **kwargs: Any,
) -> dict[str, Any]: ) -> dict[str, Any]:
assert isinstance( if not isinstance(input, dict):
input, dict msg = "The input to RunnablePassthrough.assign() must be a dict."
), "The input to RunnablePassthrough.assign() must be a dict." raise ValueError(msg)
return { return {
**input, **input,
@ -553,9 +553,9 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
) )
# consume passthrough stream # consume passthrough stream
for chunk in for_passthrough: for chunk in for_passthrough:
assert isinstance( if not isinstance(chunk, dict):
chunk, dict msg = "The input to RunnablePassthrough.assign() must be a dict."
), "The input to RunnablePassthrough.assign() must be a dict." raise ValueError(msg)
# remove mapper keys from passthrough chunk, to be overwritten by map # remove mapper keys from passthrough chunk, to be overwritten by map
filtered = AddableDict( filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys} {k: v for k, v in chunk.items() if k not in mapper_keys}
@ -603,9 +603,10 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
) )
# consume passthrough stream # consume passthrough stream
async for chunk in for_passthrough: async for chunk in for_passthrough:
assert isinstance( if not isinstance(chunk, dict):
chunk, dict msg = "The input to RunnablePassthrough.assign() must be a dict."
), "The input to RunnablePassthrough.assign() must be a dict." raise ValueError(msg)
# remove mapper keys from passthrough chunk, to be overwritten by map output # remove mapper keys from passthrough chunk, to be overwritten by map output
filtered = AddableDict( filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys} {k: v for k, v in chunk.items() if k not in mapper_keys}
@ -705,9 +706,9 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]):
return super().get_name(suffix, name=name) return super().get_name(suffix, name=name)
def _pick(self, input: dict[str, Any]) -> Any: def _pick(self, input: dict[str, Any]) -> Any:
assert isinstance( if not isinstance(input, dict):
input, dict msg = "The input to RunnablePassthrough.assign() must be a dict."
), "The input to RunnablePassthrough.assign() must be a dict." raise ValueError(msg)
if isinstance(self.keys, str): if isinstance(self.keys, str):
return input.get(self.keys) return input.get(self.keys)

View File

@ -45,7 +45,7 @@ python = ">=3.12.4"
[tool.ruff.lint] [tool.ruff.lint]
select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "UP", "W", "YTT",] select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "UP", "W", "YTT",]
ignore = [ "COM812", "UP007", "S101", "S110", "S112",] ignore = [ "COM812", "UP007", "S110", "S112",]
[tool.coverage.run] [tool.coverage.run]
omit = [ "tests/*",] omit = [ "tests/*",]