core: Add ruff rules for pycodestyle Warning (W) (#26964)

All auto-fixes.
This commit is contained in:
Christophe Bornet 2024-09-30 15:31:43 +02:00 committed by GitHub
parent 9404e7af9d
commit db8845a62a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 119 additions and 119 deletions

View File

@ -32,9 +32,9 @@ class BaseMedia(Serializable):
id: Optional[str] = None
"""An optional identifier for the document.
Ideally this should be unique across the document collection and formatted
Ideally this should be unique across the document collection and formatted
as a UUID, but this will not be enforced.
.. versionadded:: 0.2.11
"""

View File

@ -465,26 +465,26 @@ class DeleteResponse(TypedDict, total=False):
num_deleted: int
"""The number of items that were successfully deleted.
If returned, this should only include *actual* deletions.
If the ID did not exist to begin with,
If the ID did not exist to begin with,
it should not be included in this count.
"""
succeeded: Sequence[str]
"""The IDs that were successfully deleted.
If returned, this should only include *actual* deletions.
If the ID did not exist to begin with,
it should not be included in this list.
"""
failed: Sequence[str]
"""The IDs that failed to be deleted.
Please note that deleting an ID that
Please note that deleting an ID that
does not exist is **NOT** considered a failure.
"""

View File

@ -100,12 +100,12 @@ class BaseLanguageModel(
cache: Union[BaseCache, bool, None] = None
"""Whether to cache the response.
* If true, will use the global cache.
* If false, will not use a cache
* If None, will use the global cache if it's set, otherwise no cache.
* If instance of BaseCache, will use the provided cache.
Caching is not currently supported for streaming methods of models.
"""
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)

View File

@ -208,8 +208,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
disable_streaming: Union[bool, Literal["tool_calling"]] = False
"""Whether to disable streaming for this model.
If streaming is bypassed, then ``stream()/astream()`` will defer to
If streaming is bypassed, then ``stream()/astream()`` will defer to
``invoke()/ainvoke()``.
- If True, will always bypass streaming case.

View File

@ -21,12 +21,12 @@ class FakeListLLM(LLM):
# it's only used by sub-classes.
sleep: Optional[float] = None
"""Sleep time in seconds between responses.
Ignored by FakeListLLM, but used by sub-classes.
"""
i: int = 0
"""Internally incremented after every model invocation.
Useful primarily for testing purposes.
"""

View File

@ -65,7 +65,7 @@ class AIMessage(BaseMessage):
example: bool = False
"""Use to denote that a message is part of an example conversation.
At the moment, this is ignored by most models. Usage is discouraged.
"""
@ -215,7 +215,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore
"""The type of the message (used for deserialization).
"""The type of the message (used for deserialization).
Defaults to "AIMessageChunk"."""
tool_call_chunks: list[ToolCallChunk] = []

View File

@ -25,7 +25,7 @@ class BaseMessage(Serializable):
additional_kwargs: dict = Field(default_factory=dict)
"""Reserved for additional payload data associated with the message.
For example, for a message from an AI, this could include tool calls as
encoded by the model provider.
"""
@ -35,16 +35,16 @@ class BaseMessage(Serializable):
type: str
"""The type of the message. Must be a string that is unique to the message type.
The purpose of this field is to allow for easy identification of the message type
when deserializing messages.
"""
name: Optional[str] = None
"""An optional name for the message.
"""An optional name for the message.
This can be used to provide a human-readable name for the message.
Usage of this field is optional, and whether it's used or not is up to the
model implementation.
"""

View File

@ -35,7 +35,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
"""The type of the message (used during serialization).
"""The type of the message (used during serialization).
Defaults to "ChatMessageChunk"."""
@classmethod

View File

@ -42,7 +42,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
"""The type of the message (used for serialization).
Defaults to "FunctionMessageChunk"."""
@classmethod

View File

@ -30,7 +30,7 @@ class HumanMessage(BaseMessage):
example: bool = False
"""Use to denote that a message is part of an example conversation.
At the moment, this is ignored by most models. Usage is discouraged.
Defaults to False.
"""
@ -66,7 +66,7 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
"""The type of the message (used for serialization).
Defaults to "HumanMessageChunk"."""
@classmethod

View File

@ -60,7 +60,7 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
"""The type of the message (used for serialization).
Defaults to "SystemMessageChunk"."""
@classmethod

View File

@ -58,11 +58,11 @@ class ToolMessage(BaseMessage):
artifact: Any = None
"""Artifact of the Tool execution which is not meant to be sent to the model.
Should only be specified if it is different from the message content, e.g. if only
Should only be specified if it is different from the message content, e.g. if only
a subset of the full tool output is being passed as message content but the full
output is needed in other parts of the code.
.. versionadded:: 0.2.17
"""
@ -191,7 +191,7 @@ class ToolCall(TypedDict):
"""The arguments to the tool call."""
id: Optional[str]
"""An identifier associated with the tool call.
An identifier is needed to associate a tool call request with a tool
call result in events when multiple concurrent tool calls are made.
"""

View File

@ -42,7 +42,7 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore
"""The Pydantic object to use for validation.
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:

View File

@ -57,9 +57,9 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
strict: bool = False
"""Whether to allow non-JSON-compliant strings.
See: https://docs.python.org/3/library/json.html#encoders-and-decoders
Useful when the parsed output may include unicode characters or new lines.
"""
@ -226,7 +226,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
pydantic_schema: Union[type[BaseModel], dict[str, type[BaseModel]]]
"""The pydantic schema to parse the output with.
If multiple schemas are provided, then the function name will be used to
determine which schema to use.
"""

View File

@ -142,12 +142,12 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
first_tool_only: bool = False
"""Whether to return only the first tool call.
If False, the result will be a list of tool calls, or an empty list
If False, the result will be a list of tool calls, or an empty list
if no tool calls are found.
If true, and multiple tool calls are found, only the first one will be returned,
and the other tool calls will be ignored.
If no tool calls are found, None will be returned.
and the other tool calls will be ignored.
If no tool calls are found, None will be returned.
"""
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:

View File

@ -12,12 +12,12 @@ from langchain_core.output_parsers.transform import BaseTransformOutputParser
from langchain_core.runnables.utils import AddableDict
XML_FORMAT_INSTRUCTIONS = """The output should be formatted as a XML file.
1. Output should conform to the tags below.
1. Output should conform to the tags below.
2. If tags are not given, make them on your own.
3. Remember to always open and close all the tags.
As an example, for the tags ["foo", "bar", "baz"]:
1. String "<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>" is a well-formatted instance of the schema.
1. String "<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>" is a well-formatted instance of the schema.
2. String "<foo>\n <bar>\n </foo>" is a badly-formatted instance.
3. String "<foo>\n <tag>\n </tag>\n</foo>" is a badly-formatted instance.
@ -146,23 +146,23 @@ class XMLOutputParser(BaseTransformOutputParser):
)
parser: Literal["defusedxml", "xml"] = "defusedxml"
"""Parser to use for XML parsing. Can be either 'defusedxml' or 'xml'.
* 'defusedxml' is the default parser and is used to prevent XML vulnerabilities
* 'defusedxml' is the default parser and is used to prevent XML vulnerabilities
present in some distributions of Python's standard library xml.
`defusedxml` is a wrapper around the standard library parser that
sets up the parser with secure defaults.
* 'xml' is the standard library parser.
Use `xml` only if you are sure that your distribution of the standard library
is not vulnerable to XML vulnerabilities.
is not vulnerable to XML vulnerabilities.
Please review the following resources for more information:
* https://docs.python.org/3/library/xml.html#xml-vulnerabilities
* https://github.com/tiran/defusedxml
* https://github.com/tiran/defusedxml
The standard library relies on libexpat for parsing XML:
https://github.com/libexpat/libexpat
https://github.com/libexpat/libexpat
"""
def get_format_instructions(self) -> str:

View File

@ -20,16 +20,16 @@ class ChatResult(BaseModel):
generations: list[ChatGeneration]
"""List of the chat generations.
Generations is a list to allow for multiple candidate generations for a single
input prompt.
"""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on
accessing relevant information from standardized fields present in
AIMessage.

View File

@ -26,8 +26,8 @@ class Generation(Serializable):
"""Generated text output."""
generation_info: Optional[dict[str, Any]] = None
"""Raw response from the provider.
"""Raw response from the provider.
May include things like the reason for finishing or token log probabilities.
"""
type: Literal["Generation"] = "Generation"

View File

@ -22,25 +22,25 @@ class LLMResult(BaseModel):
list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
]
"""Generated outputs.
The first dimension of the list represents completions for different input
prompts.
The second dimension of the list represents different candidate generations
for a given prompt.
When returned from an LLM the type is List[List[Generation]].
When returned from a chat model the type is List[List[ChatGeneration]].
ChatGeneration is a subclass of Generation that has a field for a structured
chat message.
"""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on
accessing relevant information from standardized fields present in
AIMessage.

View File

@ -45,11 +45,11 @@ class BasePromptTemplate(
"""Base class for all prompt templates, returning a prompt."""
input_variables: list[str]
"""A list of the names of the variables whose values are required as inputs to the
"""A list of the names of the variables whose values are required as inputs to the
prompt."""
optional_variables: list[str] = Field(default=[])
"""optional_variables: A list of the names of the variables for placeholder
or MessagePlaceholder that are optional. These variables are auto inferred
or MessagePlaceholder that are optional. These variables are auto inferred
from the prompt and user need not provide them."""
input_types: typing.Dict[str, Any] = Field(default_factory=dict, exclude=True) # noqa: UP006
"""A dictionary of the types of the variables the prompt template expects.
@ -58,7 +58,7 @@ class BasePromptTemplate(
"""How to parse the output of calling an LLM on this formatted prompt."""
partial_variables: Mapping[str, Any] = Field(default_factory=dict)
"""A dictionary of the partial variables the prompt template carries.
Partial variables populate the template so that you don't need to
pass them in every time you call the prompt."""
metadata: Optional[typing.Dict[str, Any]] = None # noqa: UP006

View File

@ -196,12 +196,12 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
"""Name of variable to use as messages."""
optional: bool = False
"""If True format_messages can be called with no arguments and will return an empty
list. If False then a named argument with name `variable_name` must be passed
"""If True format_messages can be called with no arguments and will return an empty
list. If False then a named argument with name `variable_name` must be passed
in, even if the value is an empty list."""
n_messages: Optional[PositiveInt] = None
"""Maximum number of messages to include. If None, then will include all.
"""Maximum number of messages to include. If None, then will include all.
Defaults to None."""
@classmethod

View File

@ -136,14 +136,14 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
"""Optional list of tags associated with the retriever. Defaults to None.
These tags will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a retriever with its
You can use these to eg identify a specific instance of a retriever with its
use case.
"""
metadata: Optional[dict[str, Any]] = None
"""Optional metadata associated with the retriever. Defaults to None.
This metadata will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a retriever with its
You can use these to eg identify a specific instance of a retriever with its
use case.
"""

View File

@ -65,7 +65,7 @@ class RunnableConfig(TypedDict, total=False):
max_concurrency: Optional[int]
"""
Maximum number of parallel calls to make. If not provided, defaults to
Maximum number of parallel calls to make. If not provided, defaults to
ThreadPoolExecutor's default.
"""
@ -78,7 +78,7 @@ class RunnableConfig(TypedDict, total=False):
"""
Runtime values for attributes previously made configurable on this Runnable,
or sub-Runnables, through .configurable_fields() or .configurable_alternatives().
Check .output_schema() for a description of the attributes that have been made
Check .output_schema() for a description of the attributes that have been made
configurable.
"""

View File

@ -538,7 +538,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
prefix_keys: bool
"""Whether to prefix configurable fields of each alternative with a namespace
of the form <which.id>==<alternative_key>, eg. a key named "temperature" used by
of the form <which.id>==<alternative_key>, eg. a key named "temperature" used by
the alternative named "gpt3" becomes "model==gpt3/temperature"."""
@classmethod

View File

@ -93,13 +93,13 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
"""A sequence of fallbacks to try."""
exceptions_to_handle: tuple[type[BaseException], ...] = (Exception,)
"""The exceptions on which fallbacks should be tried.
Any exception that is not a subclass of these exceptions will be raised immediately.
"""
exception_key: Optional[str] = None
"""If string is specified then handled exceptions will be passed to fallbacks as
"""If string is specified then handled exceptions will be passed to fallbacks as
part of the input under the specified key. If None, exceptions
will not be passed to fallbacks. If used, the base Runnable and its fallbacks
will not be passed to fallbacks. If used, the base Runnable and its fallbacks
must accept a dictionary as input."""
model_config = ConfigDict(

View File

@ -96,10 +96,10 @@ class RunnableRetry(RunnableBindingBase[Input, Output]):
retry_exception_types: tuple[type[BaseException], ...] = (Exception,)
"""The exception types to retry on. By default all exceptions are retried.
In general you should only retry on exceptions that are likely to be
transient, such as network errors.
Good exceptions to retry are all server errors (5xx) and selected client
errors (4xx) such as 429 Too Many Requests.
"""

View File

@ -13,26 +13,26 @@ class EventData(TypedDict, total=False):
input: Any
"""The input passed to the Runnable that generated the event.
Inputs will sometimes be available at the *START* of the Runnable, and
Inputs will sometimes be available at the *START* of the Runnable, and
sometimes at the *END* of the Runnable.
If a Runnable is able to stream its inputs, then its input by definition
won't be known until the *END* of the Runnable when it has finished streaming
its inputs.
"""
output: Any
"""The output of the Runnable that generated the event.
Outputs will only be available at the *END* of the Runnable.
For most Runnables, this field can be inferred from the `chunk` field,
though there might be some exceptions for special cased Runnables (e.g., like
chat models), which may return more information.
"""
chunk: Any
"""A streaming chunk from the output that generated the event.
chunks support addition in general, and adding them up should result
in the output of the Runnable that generated the event.
"""
@ -85,49 +85,49 @@ class BaseStreamEvent(TypedDict):
event: str
"""Event names are of the format: on_[runnable_type]_(start|stream|end).
Runnable types are one of:
- **llm** - used by non chat models
- **chat_model** - used by chat models
- **prompt** -- e.g., ChatPromptTemplate
- **tool** -- from tools defined via @tool decorator or inheriting
from Tool/BaseTool
- **chain** - most Runnables are of this type
Further, the events are categorized as one of:
- **start** - when the Runnable starts
- **stream** - when the Runnable is streaming
- **end* - when the Runnable ends
start, stream and end are associated with slightly different `data` payload.
Please see the documentation for `EventData` for more details.
"""
run_id: str
"""An randomly generated ID to keep track of the execution of the given Runnable.
Each child Runnable that gets invoked as part of the execution of a parent Runnable
is assigned its own unique ID.
"""
tags: NotRequired[list[str]]
"""Tags associated with the Runnable that generated this event.
Tags are always inherited from parent Runnables.
Tags can either be bound to a Runnable using `.with_config({"tags": ["hello"]})`
or passed at run time using `.astream_events(..., {"tags": ["hello"]})`.
"""
metadata: NotRequired[dict[str, Any]]
"""Metadata associated with the Runnable that generated this event.
Metadata can either be bound to a Runnable using
Metadata can either be bound to a Runnable using
`.with_config({"metadata": { "foo": "bar" }})`
or passed at run time using
or passed at run time using
`.astream_events(..., {"metadata": {"foo": "bar"}})`.
"""
@ -136,11 +136,11 @@ class BaseStreamEvent(TypedDict):
Root Events will have an empty list.
For example, if a Runnable A calls Runnable B, then the event generated by Runnable
For example, if a Runnable A calls Runnable B, then the event generated by Runnable
B will have Runnable A's ID in the parent_ids field.
The order of the parent IDs is from the root parent to the immediate parent.
Only supported as of v2 of the astream events API. v1 will return an empty list.
"""

View File

@ -348,7 +348,7 @@ class ChildTool(BaseTool):
"""The unique name of the tool that clearly communicates its purpose."""
description: str
"""Used to tell the model how/when/why to use the tool.
You can provide few-shot examples as a part of the description.
"""
@ -356,17 +356,17 @@ class ChildTool(BaseTool):
default=None, description="The tool schema."
)
"""Pydantic model class to validate and parse the tool's input arguments.
Args schema should be either:
Args schema should be either:
- A subclass of pydantic.BaseModel.
or
or
- A subclass of pydantic.v1.BaseModel if accessing v1 namespace in pydantic 2
"""
return_direct: bool = False
"""Whether to return the tool's output directly.
Setting this to True means
"""Whether to return the tool's output directly.
Setting this to True means
that after the tool is called, the AgentExecutor will stop looping.
"""
verbose: bool = False
@ -410,8 +410,8 @@ class ChildTool(BaseTool):
response_format: Literal["content", "content_and_artifact"] = "content"
"""The tool response format. Defaults to 'content'.
If "content" then the output of the tool is interpreted as the contents of a
ToolMessage. If "content_and_artifact" then the output is expected to be a
If "content" then the output of the tool is interpreted as the contents of a
ToolMessage. If "content_and_artifact" then the output is expected to be a
two-tuple corresponding to the (content, artifact) of a ToolMessage.
"""

View File

@ -53,8 +53,8 @@ class LogEntry(TypedDict):
inputs: NotRequired[Optional[Any]]
"""Inputs to this run. Not available currently via astream_log."""
final_output: Optional[Any]
"""Final output of this run.
"""Final output of this run.
Only available after the run has finished successfully."""
end_time: Optional[str]
"""ISO-8601 timestamp of when the run ended.

View File

@ -44,7 +44,7 @@ python = ">=3.12.4"
[tool.poetry.extras]
[tool.ruff.lint]
select = [ "B", "C4", "E", "F", "I", "N", "PIE", "SIM", "T201", "UP",]
select = [ "B", "C4", "E", "F", "I", "N", "PIE", "SIM", "T201", "UP", "W"]
ignore = [ "UP007",]
[tool.coverage.run]

View File

@ -136,7 +136,7 @@ WITHOUT_END_BRACKET = """Here is a response formatted as schema:
```json
{
"foo": "bar"
"""
@ -146,7 +146,7 @@ WITH_END_BRACKET = """Here is a response formatted as schema:
{
"foo": "bar"
}
"""
WITH_END_TICK = """Here is a response formatted as schema:
@ -155,7 +155,7 @@ WITH_END_TICK = """Here is a response formatted as schema:
{
"foo": "bar"
}
```
```
"""
WITH_END_TEXT = """Here is a response formatted as schema:
@ -164,8 +164,8 @@ WITH_END_TEXT = """Here is a response formatted as schema:
{
"foo": "bar"
```
This should do the trick
```
This should do the trick
"""
TEST_CASES = [

View File

@ -219,7 +219,7 @@ def test_mustache_prompt_from_template(snapshot: SnapshotAssertion) -> None:
yo
hello
is a test."""
is a test.""" # noqa: W293
)
assert prompt.input_variables == ["foo"]
if PYDANTIC_VERSION >= (2, 9):
@ -408,7 +408,7 @@ def test_prompt_from_jinja2_template() -> None:
# Empty input variable.
template = """Hello there
There is no variable here {
Will it get confused{ }?
Will it get confused{ }?
"""
prompt = PromptTemplate.from_template(template, template_format="jinja2")
expected_prompt = PromptTemplate(