langchain: Add ruff rules FBT (#31885)

* Fixed for private functions and in tests
* Added noqa escapes for public functions
See https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt

Co-authored-by: Mason Daugherty <mason@langchain.dev>
This commit is contained in:
Christophe Bornet 2025-07-07 18:35:55 +02:00 committed by GitHub
parent 56bbfd9723
commit ed35372580
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
28 changed files with 77 additions and 53 deletions

View File

@ -27,10 +27,10 @@ def _get_default_system_message() -> SystemMessage:
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: list[BaseTool],
remember_intermediate_steps: bool = True,
remember_intermediate_steps: bool = True, # noqa: FBT001,FBT002
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False,
verbose: bool = False, # noqa: FBT001,FBT002
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:

View File

@ -35,7 +35,7 @@ def create_vectorstore_agent(
toolkit: VectorStoreToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = PREFIX,
verbose: bool = False,
verbose: bool = False, # noqa: FBT001,FBT002
agent_executor_kwargs: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:
@ -128,7 +128,7 @@ def create_vectorstore_router_agent(
toolkit: VectorStoreRouterToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = ROUTER_PREFIX,
verbose: bool = False,
verbose: bool = False, # noqa: FBT001,FBT002
agent_executor_kwargs: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> AgentExecutor:

View File

@ -16,7 +16,7 @@ def create_json_chat_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
stop_sequence: Union[bool, list[str]] = True,
stop_sequence: Union[bool, list[str]] = True, # noqa: FBT001,FBT002
tools_renderer: ToolsRenderer = render_text_description,
template_tool_response: str = TEMPLATE_TOOL_RESPONSE,
) -> Runnable:

View File

@ -97,7 +97,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
with_functions: bool = True,
with_functions: bool = True, # noqa: FBT001,FBT002
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.

View File

@ -17,7 +17,7 @@ def create_openai_tools_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
strict: Optional[bool] = None,
strict: Optional[bool] = None, # noqa: FBT001
) -> Runnable:
"""Create an agent that uses OpenAI tools.

View File

@ -254,7 +254,10 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
@field_validator("verbose", mode="before")
@classmethod
def set_verbose(cls, verbose: Optional[bool]) -> bool:
def set_verbose(
cls,
verbose: Optional[bool], # noqa: FBT001
) -> bool:
"""Set the chain verbosity.
Defaults to the global setting if not specified by the user.
@ -353,7 +356,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
def __call__(
self,
inputs: Union[dict[str, Any], Any],
return_only_outputs: bool = False,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
callbacks: Callbacks = None,
*,
tags: Optional[list[str]] = None,
@ -404,7 +407,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
async def acall(
self,
inputs: Union[dict[str, Any], Any],
return_only_outputs: bool = False,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
callbacks: Callbacks = None,
*,
tags: Optional[list[str]] = None,
@ -454,7 +457,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
self,
inputs: dict[str, str],
outputs: dict[str, str],
return_only_outputs: bool = False,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
) -> dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
@ -479,7 +482,7 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC):
self,
inputs: dict[str, str],
outputs: dict[str, str],
return_only_outputs: bool = False,
return_only_outputs: bool = False, # noqa: FBT001,FBT002
) -> dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.

View File

@ -420,7 +420,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
retriever: BaseRetriever,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
chain_type: str = "stuff",
verbose: bool = False,
verbose: bool = False, # noqa: FBT001,FBT002
condense_question_llm: Optional[BaseLanguageModel] = None,
combine_docs_chain_kwargs: Optional[dict] = None,
callbacks: Callbacks = None,

View File

@ -30,6 +30,7 @@ def _load_sequential_chain(
check_assertions_prompt: PromptTemplate,
revised_summary_prompt: PromptTemplate,
are_all_true_prompt: PromptTemplate,
*,
verbose: bool = False,
) -> SequentialChain:
return SequentialChain(
@ -188,7 +189,7 @@ class LLMSummarizationCheckerChain(Chain):
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT,
revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT,
are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT,
verbose: bool = False,
verbose: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> LLMSummarizationCheckerChain:
chain = _load_sequential_chain(

View File

@ -84,7 +84,7 @@ def create_extraction_chain(
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
tags: Optional[list[str]] = None,
verbose: bool = False,
verbose: bool = False, # noqa: FBT001,FBT002
) -> Chain:
"""Creates a chain that extracts information from a passage.
@ -152,7 +152,7 @@ def create_extraction_chain_pydantic(
pydantic_schema: Any,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
verbose: bool = False,
verbose: bool = False, # noqa: FBT001,FBT002
) -> Chain:
"""Creates a chain that extracts information from a passage using pydantic schema.

View File

@ -23,14 +23,6 @@ if TYPE_CHECKING:
from openapi_pydantic import Parameter
def _get_description(o: Any, prefer_short: bool) -> Optional[str]:
summary = getattr(o, "summary", None)
description = getattr(o, "description", None)
if prefer_short:
return summary or description
return description or summary
def _format_url(url: str, path_params: dict) -> str:
expected_path_param = re.findall(r"{(.*?)}", url)
new_params = {}
@ -260,7 +252,7 @@ def get_openapi_chain(
prompt: Optional[BasePromptTemplate] = None,
request_chain: Optional[Chain] = None,
llm_chain_kwargs: Optional[dict] = None,
verbose: bool = False,
verbose: bool = False, # noqa: FBT001,FBT002
headers: Optional[dict] = None,
params: Optional[dict] = None,
**kwargs: Any,

View File

@ -40,7 +40,7 @@ def create_qa_with_structure_chain(
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False,
verbose: bool = False, # noqa: FBT001,FBT002
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
@ -119,7 +119,9 @@ def create_qa_with_structure_chain(
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
llm: BaseLanguageModel,
verbose: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.

View File

@ -37,6 +37,7 @@ class LoadingCallable(Protocol):
def _load_map_rerank_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = MAP_RERANK_PROMPT,
verbose: bool = False,
document_variable_name: str = "context",
@ -56,6 +57,7 @@ def _load_map_rerank_chain(
def _load_stuff_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_prompt: BasePromptTemplate = stuff_prompt.EXAMPLE_PROMPT,
document_variable_name: str = "summaries",
@ -74,6 +76,7 @@ def _load_stuff_chain(
def _load_map_reduce_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT,
document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT,
@ -131,6 +134,7 @@ def _load_map_reduce_chain(
def _load_refine_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT,
document_prompt: BasePromptTemplate = refine_prompts.EXAMPLE_PROMPT,
@ -172,7 +176,7 @@ def _load_refine_chain(
def load_qa_with_sources_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: Optional[bool] = None,
verbose: Optional[bool] = None, # noqa: FBT001
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load a question answering with sources chain.

View File

@ -72,7 +72,7 @@ class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]):
allowed_comparators: Optional[Sequence[Comparator]] = None,
allowed_operators: Optional[Sequence[Operator]] = None,
allowed_attributes: Optional[Sequence[str]] = None,
fix_invalid: bool = False,
fix_invalid: bool = False, # noqa: FBT001,FBT002
) -> StructuredQueryOutputParser:
"""
Create a structured query output parser from components.
@ -269,7 +269,7 @@ def load_query_constructor_chain(
examples: Optional[list] = None,
allowed_comparators: Sequence[Comparator] = tuple(Comparator),
allowed_operators: Sequence[Operator] = tuple(Operator),
enable_limit: bool = False,
enable_limit: bool = False, # noqa: FBT001,FBT002
schema_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> LLMChain:

View File

@ -36,6 +36,7 @@ class LoadingCallable(Protocol):
def _load_map_rerank_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = MAP_RERANK_PROMPT,
verbose: bool = False,
document_variable_name: str = "context",
@ -65,6 +66,7 @@ def _load_map_rerank_chain(
def _load_stuff_chain(
llm: BaseLanguageModel,
*,
prompt: Optional[BasePromptTemplate] = None,
document_variable_name: str = "context",
verbose: Optional[bool] = None,
@ -93,6 +95,7 @@ def _load_stuff_chain(
def _load_map_reduce_chain(
llm: BaseLanguageModel,
*,
question_prompt: Optional[BasePromptTemplate] = None,
combine_prompt: Optional[BasePromptTemplate] = None,
combine_document_variable_name: str = "summaries",
@ -176,6 +179,7 @@ def _load_map_reduce_chain(
def _load_refine_chain(
llm: BaseLanguageModel,
*,
question_prompt: Optional[BasePromptTemplate] = None,
refine_prompt: Optional[BasePromptTemplate] = None,
document_variable_name: str = "context_str",
@ -236,7 +240,7 @@ def _load_refine_chain(
def load_qa_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: Optional[bool] = None,
verbose: Optional[bool] = None, # noqa: FBT001
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> BaseCombineDocumentsChain:

View File

@ -27,6 +27,7 @@ class LoadingCallable(Protocol):
def _load_stuff_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_variable_name: str = "text",
verbose: Optional[bool] = None,
@ -44,6 +45,7 @@ def _load_stuff_chain(
def _load_map_reduce_chain(
llm: BaseLanguageModel,
*,
map_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
combine_document_variable_name: str = "text",
@ -54,7 +56,6 @@ def _load_map_reduce_chain(
verbose: Optional[bool] = None,
token_max: int = 3000,
callbacks: Callbacks = None,
*,
collapse_max_retries: Optional[int] = None,
**kwargs: Any,
) -> MapReduceDocumentsChain:
@ -117,6 +118,7 @@ def _load_map_reduce_chain(
def _load_refine_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate = refine_prompts.PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.REFINE_PROMPT,
document_variable_name: str = "text",
@ -141,7 +143,7 @@ def _load_refine_chain(
def load_summarize_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: Optional[bool] = None,
verbose: Optional[bool] = None, # noqa: FBT001
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load summarizing chain.

View File

@ -106,7 +106,7 @@ class _RapidFuzzChainMixin(Chain):
return result
@staticmethod
def _get_metric(distance: str, normalize_score: bool = False) -> Callable:
def _get_metric(distance: str, *, normalize_score: bool = False) -> Callable:
"""
Get the distance metric function based on the distance type.

View File

@ -16,7 +16,9 @@ _debug: bool = False
_llm_cache: Optional["BaseCache"] = None
def set_verbose(value: bool) -> None:
def set_verbose(
value: bool, # noqa: FBT001
) -> None:
"""Set a new value for the `verbose` global setting."""
import langchain
@ -71,7 +73,9 @@ def get_verbose() -> bool:
return _verbose or old_verbose
def set_debug(value: bool) -> None:
def set_debug(
value: bool, # noqa: FBT001
) -> None:
"""Set a new value for the `debug` global setting."""
import langchain

View File

@ -43,7 +43,10 @@ class StructuredOutputParser(BaseOutputParser[dict[str, Any]]):
) -> StructuredOutputParser:
return cls(response_schemas=response_schemas)
def get_format_instructions(self, only_json: bool = False) -> str:
def get_format_instructions(
self,
only_json: bool = False, # noqa: FBT001,FBT002
) -> str:
"""Get format instructions for the output parser.
example:

View File

@ -66,7 +66,7 @@ class MultiQueryRetriever(BaseRetriever):
llm: BaseLanguageModel,
prompt: BasePromptTemplate = DEFAULT_QUERY_PROMPT,
parser_key: Optional[str] = None,
include_original: bool = False,
include_original: bool = False, # noqa: FBT001,FBT002
) -> "MultiQueryRetriever":
"""Initialize from llm using default template.

View File

@ -74,6 +74,7 @@ class ParentDocumentRetriever(MultiVectorRetriever):
self,
documents: list[Document],
ids: Optional[list[str]] = None,
*,
add_to_docstore: bool = True,
) -> tuple[list[Document], list[tuple[str, Document]]]:
if self.parent_splitter is not None:
@ -113,7 +114,7 @@ class ParentDocumentRetriever(MultiVectorRetriever):
self,
documents: list[Document],
ids: Optional[list[str]] = None,
add_to_docstore: bool = True,
add_to_docstore: bool = True, # noqa: FBT001,FBT002
**kwargs: Any,
) -> None:
"""Adds documents to the docstore and vectorstores.
@ -130,7 +131,9 @@ class ParentDocumentRetriever(MultiVectorRetriever):
to set this to False if the documents are already in the docstore
and you don't want to re-add them.
"""
docs, full_docs = self._split_docs_for_adding(documents, ids, add_to_docstore)
docs, full_docs = self._split_docs_for_adding(
documents, ids, add_to_docstore=add_to_docstore
)
self.vectorstore.add_documents(docs, **kwargs)
if add_to_docstore:
self.docstore.mset(full_docs)
@ -139,10 +142,12 @@ class ParentDocumentRetriever(MultiVectorRetriever):
self,
documents: list[Document],
ids: Optional[list[str]] = None,
add_to_docstore: bool = True,
add_to_docstore: bool = True, # noqa: FBT001,FBT002
**kwargs: Any,
) -> None:
docs, full_docs = self._split_docs_for_adding(documents, ids, add_to_docstore)
docs, full_docs = self._split_docs_for_adding(
documents, ids, add_to_docstore=add_to_docstore
)
await self.vectorstore.aadd_documents(docs, **kwargs)
if add_to_docstore:
await self.docstore.amset(full_docs)

View File

@ -342,8 +342,8 @@ class SelfQueryRetriever(BaseRetriever):
metadata_field_info: Sequence[Union[AttributeInfo, dict]],
structured_query_translator: Optional[Visitor] = None,
chain_kwargs: Optional[dict] = None,
enable_limit: bool = False,
use_original_query: bool = False,
enable_limit: bool = False, # noqa: FBT001,FBT002
use_original_query: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> "SelfQueryRetriever":
if structured_query_translator is None:

View File

@ -1146,7 +1146,11 @@ class _DatasetRunContainer:
aggregate_metrics=aggregate_feedback,
)
def finish(self, batch_results: list, verbose: bool = False) -> TestResult:
def finish(
self,
batch_results: list,
verbose: bool = False, # noqa: FBT001,FBT002
) -> TestResult:
results = self._collect_test_results(batch_results)
if verbose:
try:

View File

@ -143,7 +143,7 @@ ignore-regex = ".*(Stati Uniti|Tense=Pres).*"
ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin"
[tool.ruff.lint]
select = ["A", "C4", "D", "E", "EM", "F", "I", "PGH003", "PIE", "RET", "S", "SIM", "T201", "UP", "W"]
select = ["A", "C4", "D", "E", "EM", "F", "FBT", "I", "PGH003", "PIE", "RET", "S", "SIM", "T201", "UP", "W"]
pydocstyle.convention = "google"
pyupgrade.keep-runtime-typing = true

View File

@ -136,7 +136,7 @@ async def get_state(
@app.get("/ask_for_passphrase", description="Get the robot's pass phrase")
async def ask_for_passphrase(said_please: bool) -> dict[str, Any]:
async def ask_for_passphrase(*, said_please: bool) -> dict[str, Any]:
if said_please:
return {"passphrase": f"The passphrase is {PASS_PHRASE}"}
return {"passphrase": "I won't share the passphrase without saying 'please'."}

View File

@ -182,7 +182,7 @@ def test_simple_sequential_functionality() -> None:
@pytest.mark.parametrize("isAsync", [False, True])
async def test_simple_sequential_functionality_with_callbacks(isAsync: bool) -> None:
async def test_simple_sequential_functionality_with_callbacks(*, isAsync: bool) -> None:
"""Test simple sequential functionality."""
handler_1 = FakeCallbackHandler()
handler_2 = FakeCallbackHandler()

View File

@ -31,7 +31,7 @@ async def test_zero_distance_async(distance: StringDistance) -> None:
@pytest.mark.parametrize("distance", list(StringDistance))
@pytest.mark.parametrize("normalize_score", [True, False])
def test_zero_distance_pairwise(
distance: StringDistance, normalize_score: bool
*, distance: StringDistance, normalize_score: bool
) -> None:
eval_chain = PairwiseStringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
@ -62,7 +62,7 @@ valid_distances = [
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
@pytest.mark.parametrize("normalize_score", [True, False])
def test_non_zero_distance(distance: StringDistance, normalize_score: bool) -> None:
def test_non_zero_distance(*, distance: StringDistance, normalize_score: bool) -> None:
eval_chain = StringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)

View File

@ -40,7 +40,7 @@ class ToyLoader(BaseLoader):
class InMemoryVectorStore(VectorStore):
"""In-memory implementation of VectorStore using a dictionary."""
def __init__(self, permit_upserts: bool = False) -> None:
def __init__(self, *, permit_upserts: bool = False) -> None:
"""Vector store interface for testing things in memory."""
self.store: dict[str, Document] = {}
self.permit_upserts = permit_upserts

View File

@ -17,9 +17,9 @@ def test_no_warning() -> None:
get_verbose()
set_verbose(False)
core_get_debug()
core_set_debug(False)
core_set_debug(value=False)
core_get_verbose()
core_set_verbose(False)
core_set_verbose(value=False)
def test_debug_is_settable_directly() -> None: