mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-09 14:35:50 +00:00
core: Bump ruff version to 0.12 (#31846)
This commit is contained in:
parent
73552883c3
commit
8aed3b61a9
@ -130,7 +130,7 @@ class BaseLanguageModel(
|
||||
)
|
||||
|
||||
@field_validator("verbose", mode="before")
|
||||
def set_verbose(cls, verbose: Optional[bool]) -> bool:
|
||||
def set_verbose(cls, verbose: Optional[bool]) -> bool: # noqa: FBT001
|
||||
"""If verbose is None, set it.
|
||||
|
||||
This allows users to pass in None as verbose to access the global setting.
|
||||
|
@ -129,7 +129,7 @@ def create_base_retry_decorator(
|
||||
)
|
||||
|
||||
|
||||
def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
||||
def _resolve_cache(*, cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
||||
"""Resolve the cache."""
|
||||
if isinstance(cache, BaseCache):
|
||||
llm_cache = cache
|
||||
@ -155,7 +155,7 @@ def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
||||
def get_prompts(
|
||||
params: dict[str, Any],
|
||||
prompts: list[str],
|
||||
cache: Optional[Union[BaseCache, bool, None]] = None,
|
||||
cache: Optional[Union[BaseCache, bool, None]] = None, # noqa: FBT001
|
||||
) -> tuple[dict[int, list], str, list[int], list[str]]:
|
||||
"""Get prompts that are already cached.
|
||||
|
||||
@ -176,7 +176,7 @@ def get_prompts(
|
||||
missing_prompt_idxs = []
|
||||
existing_prompts = {}
|
||||
|
||||
llm_cache = _resolve_cache(cache)
|
||||
llm_cache = _resolve_cache(cache=cache)
|
||||
for i, prompt in enumerate(prompts):
|
||||
if llm_cache:
|
||||
cache_val = llm_cache.lookup(prompt, llm_string)
|
||||
@ -191,7 +191,7 @@ def get_prompts(
|
||||
async def aget_prompts(
|
||||
params: dict[str, Any],
|
||||
prompts: list[str],
|
||||
cache: Optional[Union[BaseCache, bool, None]] = None,
|
||||
cache: Optional[Union[BaseCache, bool, None]] = None, # noqa: FBT001
|
||||
) -> tuple[dict[int, list], str, list[int], list[str]]:
|
||||
"""Get prompts that are already cached. Async version.
|
||||
|
||||
@ -211,7 +211,7 @@ async def aget_prompts(
|
||||
missing_prompts = []
|
||||
missing_prompt_idxs = []
|
||||
existing_prompts = {}
|
||||
llm_cache = _resolve_cache(cache)
|
||||
llm_cache = _resolve_cache(cache=cache)
|
||||
for i, prompt in enumerate(prompts):
|
||||
if llm_cache:
|
||||
cache_val = await llm_cache.alookup(prompt, llm_string)
|
||||
@ -224,7 +224,7 @@ async def aget_prompts(
|
||||
|
||||
|
||||
def update_cache(
|
||||
cache: Union[BaseCache, bool, None],
|
||||
cache: Union[BaseCache, bool, None], # noqa: FBT001
|
||||
existing_prompts: dict[int, list],
|
||||
llm_string: str,
|
||||
missing_prompt_idxs: list[int],
|
||||
@ -247,7 +247,7 @@ def update_cache(
|
||||
Raises:
|
||||
ValueError: If the cache is not set and cache is True.
|
||||
"""
|
||||
llm_cache = _resolve_cache(cache)
|
||||
llm_cache = _resolve_cache(cache=cache)
|
||||
for i, result in enumerate(new_results.generations):
|
||||
existing_prompts[missing_prompt_idxs[i]] = result
|
||||
prompt = prompts[missing_prompt_idxs[i]]
|
||||
@ -257,7 +257,7 @@ def update_cache(
|
||||
|
||||
|
||||
async def aupdate_cache(
|
||||
cache: Union[BaseCache, bool, None],
|
||||
cache: Union[BaseCache, bool, None], # noqa: FBT001
|
||||
existing_prompts: dict[int, list],
|
||||
llm_string: str,
|
||||
missing_prompt_idxs: list[int],
|
||||
@ -280,7 +280,7 @@ async def aupdate_cache(
|
||||
Raises:
|
||||
ValueError: If the cache is not set and cache is True.
|
||||
"""
|
||||
llm_cache = _resolve_cache(cache)
|
||||
llm_cache = _resolve_cache(cache=cache)
|
||||
for i, result in enumerate(new_results.generations):
|
||||
existing_prompts[missing_prompt_idxs[i]] = result
|
||||
prompt = prompts[missing_prompt_idxs[i]]
|
||||
|
@ -1341,8 +1341,8 @@ def _first_max_tokens(
|
||||
excluded.content = list(reversed(excluded.content))
|
||||
for _ in range(1, num_block):
|
||||
excluded.content = excluded.content[:-1]
|
||||
if token_counter(messages[:idx] + [excluded]) <= max_tokens:
|
||||
messages = messages[:idx] + [excluded]
|
||||
if token_counter([*messages[:idx], excluded]) <= max_tokens:
|
||||
messages = [*messages[:idx], excluded]
|
||||
idx += 1
|
||||
included_partial = True
|
||||
break
|
||||
@ -1393,7 +1393,7 @@ def _first_max_tokens(
|
||||
if partial_strategy == "last":
|
||||
content_splits = list(reversed(content_splits))
|
||||
excluded.content = "".join(content_splits)
|
||||
messages = messages[:idx] + [excluded]
|
||||
messages = [*messages[:idx], excluded]
|
||||
idx += 1
|
||||
|
||||
if end_on:
|
||||
|
@ -97,3 +97,5 @@ class LLMResult(BaseModel):
|
||||
self.generations == other.generations
|
||||
and self.llm_output == other.llm_output
|
||||
)
|
||||
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
@ -4205,6 +4205,8 @@ class RunnableGenerator(Runnable[Input, Output]):
|
||||
return False
|
||||
return False
|
||||
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
||||
@override
|
||||
def __repr__(self) -> str:
|
||||
return f"RunnableGenerator({self.name})"
|
||||
@ -4588,6 +4590,8 @@ class RunnableLambda(Runnable[Input, Output]):
|
||||
return False
|
||||
return False
|
||||
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""A string representation of this Runnable."""
|
||||
if self._repr is None:
|
||||
|
@ -3,7 +3,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Mapping
|
||||
from itertools import starmap
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@ -206,7 +205,7 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
|
||||
configs = get_config_list(config, len(inputs))
|
||||
return await gather_with_concurrency(
|
||||
configs[0].get("max_concurrency"),
|
||||
*starmap(ainvoke, zip(runnables, actual_inputs, configs)),
|
||||
*map(ainvoke, runnables, actual_inputs, configs),
|
||||
)
|
||||
|
||||
@override
|
||||
|
@ -776,7 +776,7 @@ class ChildTool(BaseTool):
|
||||
def run(
|
||||
self,
|
||||
tool_input: Union[str, dict[str, Any]],
|
||||
verbose: Optional[bool] = None,
|
||||
verbose: Optional[bool] = None, # noqa: FBT001
|
||||
start_color: Optional[str] = "green",
|
||||
color: Optional[str] = "green",
|
||||
callbacks: Callbacks = None,
|
||||
@ -888,7 +888,7 @@ class ChildTool(BaseTool):
|
||||
async def arun(
|
||||
self,
|
||||
tool_input: Union[str, dict],
|
||||
verbose: Optional[bool] = None,
|
||||
verbose: Optional[bool] = None, # noqa: FBT001
|
||||
start_color: Optional[str] = "green",
|
||||
color: Optional[str] = "green",
|
||||
callbacks: Callbacks = None,
|
||||
|
@ -130,6 +130,8 @@ class RunLogPatch:
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, RunLogPatch) and self.ops == other.ops
|
||||
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
||||
|
||||
class RunLog(RunLogPatch):
|
||||
"""Run log."""
|
||||
@ -174,6 +176,8 @@ class RunLog(RunLogPatch):
|
||||
# Then compare that the ops are the same
|
||||
return super().__eq__(other)
|
||||
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
@ -27,7 +27,7 @@ repository = "https://github.com/langchain-ai/langchain"
|
||||
|
||||
[dependency-groups]
|
||||
lint = [
|
||||
"ruff<0.12.0,>=0.11.2",
|
||||
"ruff<0.13.0,>=0.12.2",
|
||||
]
|
||||
typing = [
|
||||
"mypy<1.16,>=1.15",
|
||||
@ -95,12 +95,12 @@ ignore = [
|
||||
"TC003", # Doesn't play well with Pydantic
|
||||
"TD002", # Missing author in TODO
|
||||
"TD003", # Missing issue link in TODO
|
||||
"UP007", # Doesn't play well with Pydantic in Python 3.9
|
||||
|
||||
# TODO rules
|
||||
"ANN401",
|
||||
"BLE",
|
||||
"ERA",
|
||||
"PLC0415",
|
||||
"PLR2004",
|
||||
]
|
||||
flake8-type-checking.runtime-evaluated-base-classes = ["pydantic.BaseModel","langchain_core.load.serializable.Serializable","langchain_core.runnables.base.RunnableSerializable"]
|
||||
@ -108,6 +108,7 @@ flake8-annotations.allow-star-arg-any = true
|
||||
flake8-annotations.mypy-init-return = true
|
||||
pydocstyle.convention = "google"
|
||||
pydocstyle.ignore-var-parameters = true
|
||||
pyupgrade.keep-runtime-typing = true
|
||||
|
||||
[tool.coverage.run]
|
||||
omit = [ "tests/*",]
|
||||
|
@ -329,6 +329,7 @@ class StreamingModel(NoStreamingModel):
|
||||
|
||||
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
|
||||
def test_disable_streaming(
|
||||
*,
|
||||
disable_streaming: Union[bool, Literal["tool_calling"]],
|
||||
) -> None:
|
||||
model = StreamingModel(disable_streaming=disable_streaming)
|
||||
@ -353,6 +354,7 @@ def test_disable_streaming(
|
||||
|
||||
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
|
||||
async def test_disable_streaming_async(
|
||||
*,
|
||||
disable_streaming: Union[bool, Literal["tool_calling"]],
|
||||
) -> None:
|
||||
model = StreamingModel(disable_streaming=disable_streaming)
|
||||
@ -379,6 +381,7 @@ async def test_disable_streaming_async(
|
||||
|
||||
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
|
||||
def test_disable_streaming_no_streaming_model(
|
||||
*,
|
||||
disable_streaming: Union[bool, Literal["tool_calling"]],
|
||||
) -> None:
|
||||
model = NoStreamingModel(disable_streaming=disable_streaming)
|
||||
@ -393,6 +396,7 @@ def test_disable_streaming_no_streaming_model(
|
||||
|
||||
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
|
||||
async def test_disable_streaming_no_streaming_model_async(
|
||||
*,
|
||||
disable_streaming: Union[bool, Literal["tool_calling"]],
|
||||
) -> None:
|
||||
model = NoStreamingModel(disable_streaming=disable_streaming)
|
||||
|
@ -21,6 +21,8 @@ class NonBoolObj:
|
||||
def __repr__(self) -> str:
|
||||
return self.__class__.__name__
|
||||
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
||||
|
||||
def test_simple_serialization() -> None:
|
||||
class Foo(Serializable):
|
||||
@ -100,6 +102,8 @@ def test__is_field_useful() -> None:
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return self # type: ignore[return-value]
|
||||
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
|
||||
default_x = ArrayObj()
|
||||
default_y = NonBoolObj()
|
||||
|
||||
|
@ -266,7 +266,7 @@ def test_prompt_jinja2_missing_input_variables(
|
||||
suffix = "Ending with {{ bar }}"
|
||||
|
||||
# Test when missing in suffix
|
||||
with pytest.warns(UserWarning):
|
||||
with pytest.warns(UserWarning, match="Missing variables: {'bar'}"):
|
||||
FewShotPromptTemplate(
|
||||
input_variables=[],
|
||||
suffix=suffix,
|
||||
@ -284,7 +284,7 @@ def test_prompt_jinja2_missing_input_variables(
|
||||
).input_variables == ["bar"]
|
||||
|
||||
# Test when missing in prefix
|
||||
with pytest.warns(UserWarning):
|
||||
with pytest.warns(UserWarning, match="Missing variables: {'foo'}"):
|
||||
FewShotPromptTemplate(
|
||||
input_variables=["bar"],
|
||||
suffix=suffix,
|
||||
@ -311,7 +311,7 @@ def test_prompt_jinja2_extra_input_variables(
|
||||
"""Test error is raised when there are too many input variables."""
|
||||
prefix = "Starting with {{ foo }}"
|
||||
suffix = "Ending with {{ bar }}"
|
||||
with pytest.warns(UserWarning):
|
||||
with pytest.warns(UserWarning, match="Extra variables:"):
|
||||
FewShotPromptTemplate(
|
||||
input_variables=["bar", "foo", "extra", "thing"],
|
||||
suffix=suffix,
|
||||
|
@ -509,7 +509,7 @@ def test_prompt_jinja2_missing_input_variables() -> None:
|
||||
"""Test error is raised when input variables are not provided."""
|
||||
template = "This is a {{ foo }} test."
|
||||
input_variables: list = []
|
||||
with pytest.warns(UserWarning):
|
||||
with pytest.warns(UserWarning, match="Missing variables: {'foo'}"):
|
||||
PromptTemplate(
|
||||
input_variables=input_variables,
|
||||
template=template,
|
||||
@ -526,7 +526,7 @@ def test_prompt_jinja2_extra_input_variables() -> None:
|
||||
"""Test error is raised when there are too many input variables."""
|
||||
template = "This is a {{ foo }} test."
|
||||
input_variables = ["foo", "bar"]
|
||||
with pytest.warns(UserWarning):
|
||||
with pytest.warns(UserWarning, match="Extra variables: {'bar'}"):
|
||||
PromptTemplate(
|
||||
input_variables=input_variables,
|
||||
template=template,
|
||||
@ -543,7 +543,9 @@ def test_prompt_jinja2_wrong_input_variables() -> None:
|
||||
"""Test error is raised when name of input variable is wrong."""
|
||||
template = "This is a {{ foo }} test."
|
||||
input_variables = ["bar"]
|
||||
with pytest.warns(UserWarning):
|
||||
with pytest.warns(
|
||||
UserWarning, match="Missing variables: {'foo'} Extra variables: {'bar'}"
|
||||
):
|
||||
PromptTemplate(
|
||||
input_variables=input_variables,
|
||||
template=template,
|
||||
|
@ -10,6 +10,8 @@ class AnyStr(str):
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, str)
|
||||
|
||||
__hash__ = str.__hash__
|
||||
|
||||
|
||||
# The code below creates version of pydantic models
|
||||
# that will work in unit tests with AnyStr as id field
|
||||
|
@ -884,6 +884,7 @@ def test_validation_error_handling_callable() -> None:
|
||||
],
|
||||
)
|
||||
def test_validation_error_handling_non_validation_error(
|
||||
*,
|
||||
handler: Union[
|
||||
bool, str, Callable[[Union[ValidationError, ValidationErrorV1]], str]
|
||||
],
|
||||
@ -949,6 +950,7 @@ async def test_async_validation_error_handling_callable() -> None:
|
||||
],
|
||||
)
|
||||
async def test_async_validation_error_handling_non_validation_error(
|
||||
*,
|
||||
handler: Union[
|
||||
bool, str, Callable[[Union[ValidationError, ValidationErrorV1]], str]
|
||||
],
|
||||
@ -2331,6 +2333,9 @@ def test_tool_return_output_mixin() -> None:
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, self.__class__) and self.x == other.x
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self.x)
|
||||
|
||||
@tool
|
||||
def foo(x: int) -> Bar:
|
||||
"""Foo."""
|
||||
|
3180
libs/core/uv.lock
3180
libs/core/uv.lock
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user