infra: rm unused # noqa violations (#22049)

Updating #21137
This commit is contained in:
Bagatur
2024-05-22 15:21:08 -07:00
committed by GitHub
parent 45ed5f3f51
commit 50186da0a1
149 changed files with 212 additions and 214 deletions

View File

@@ -1,4 +1,4 @@
from typing import Any, List, Optional # noqa: E501
from typing import Any, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory

View File

@@ -33,8 +33,8 @@ class LoggingCallbackHandler(FunctionCallbackHandler):
text: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None, # noqa: ARG002
**kwargs: Any, # noqa: ARG002
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
try:
crumbs_str = f"[{self.get_breadcrumbs(run=self._get_run(run_id=run_id))}] "

View File

@@ -76,7 +76,7 @@ def _openapi_params_to_json_schema(params: List[Parameter], spec: OpenAPISpec) -
if p.param_schema:
schema = spec.get_schema(p.param_schema)
else:
media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore # noqa: E501
media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore
schema = spec.get_schema(media_type_schema)
if p.description and not schema.description:
schema.description = p.description
@@ -237,7 +237,7 @@ class SimpleRequestChain(Chain):
else:
try:
response = api_response.json()
except Exception: # noqa: E722
except Exception:
response = api_response.text
return {self.output_key: response}
@@ -280,7 +280,7 @@ def get_openapi_chain(
break
except ImportError as e:
raise e
except Exception: # noqa: E722
except Exception:
pass
if isinstance(spec, str):
raise ValueError(f"Unable to parse spec from source {spec}")

View File

@@ -141,7 +141,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
)
print(result["score"]) # noqa: T201
# 0
""" # noqa: E501
"""
agent_tools: Optional[List[BaseTool]] = None
"""A list of tools available to the agent."""

View File

@@ -142,7 +142,7 @@ def resolve_criteria(
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
""" # noqa: E501
"""
if criteria is None:
return {
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS],
@@ -307,7 +307,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
""" # noqa: E501
"""
return resolve_criteria(criteria)
@classmethod

View File

@@ -56,7 +56,7 @@ def load_dataset(uri: str) -> List[Dict]:
from langchain.evaluation import load_dataset
ds = load_dataset("llm-math")
""" # noqa: E501
"""
try:
from datasets import load_dataset
except ImportError:

View File

@@ -439,7 +439,7 @@ class LabeledScoreStringEvalChain(ScoreStringEvalChain):
Raises:
ValueError: If the input variables are not as expected.
""" # noqa: E501
"""
expected_input_vars = {
"prediction",
"input",

View File

@@ -133,7 +133,7 @@ class RunEvalConfig(BaseModel):
as `EvaluatorType.QA`, the evaluator type string ("qa"), or a configuration for a
given evaluator
(e.g.,
:class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`).""" # noqa: E501
:class:`RunEvalConfig.QA <langchain.smith.evaluation.config.RunEvalConfig.QA>`)."""
custom_evaluators: Optional[List[CUSTOM_EVALUATOR_TYPE]] = None
"""Custom evaluators to apply to the dataset run."""
batch_evaluators: Optional[List[BATCH_EVALUATOR_LIKE]] = None

View File

@@ -225,7 +225,7 @@ def _wrap_in_chain_factory(
return lambda: RunnableLambda(constructor)
else:
# Typical correct case
return constructor # noqa
return constructor
return llm_or_chain_factory