mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-19 11:08:55 +00:00
feat(langchain): add ruff rules G (#32029)
https://docs.astral.sh/ruff/rules/#flake8-logging-format-g
This commit is contained in:
parent
19fff8cba9
commit
953592d4f7
@ -147,7 +147,9 @@ class AgentExecutorIterator:
|
|||||||
self.iterations += 1
|
self.iterations += 1
|
||||||
self.time_elapsed = time.time() - self.start_time
|
self.time_elapsed = time.time() - self.start_time
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Agent Iterations: {self.iterations} ({self.time_elapsed:.2f}s elapsed)",
|
"Agent Iterations: %s (%.2fs elapsed)",
|
||||||
|
self.iterations,
|
||||||
|
self.time_elapsed,
|
||||||
)
|
)
|
||||||
|
|
||||||
def make_final_outputs(
|
def make_final_outputs(
|
||||||
|
@ -146,7 +146,9 @@ class UpstashRedisEntityStore(BaseEntityStore):
|
|||||||
or default
|
or default
|
||||||
or ""
|
or ""
|
||||||
)
|
)
|
||||||
logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'")
|
logger.debug(
|
||||||
|
"Upstash Redis MEM get '%s:%s': '%s'", self.full_key_prefix, key, res
|
||||||
|
)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def set(self, key: str, value: Optional[str]) -> None:
|
def set(self, key: str, value: Optional[str]) -> None:
|
||||||
@ -154,7 +156,11 @@ class UpstashRedisEntityStore(BaseEntityStore):
|
|||||||
return self.delete(key)
|
return self.delete(key)
|
||||||
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
|
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}",
|
"Redis MEM set '%s:%s': '%s' EX %s",
|
||||||
|
self.full_key_prefix,
|
||||||
|
key,
|
||||||
|
value,
|
||||||
|
self.ttl,
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -249,7 +255,7 @@ class RedisEntityStore(BaseEntityStore):
|
|||||||
or default
|
or default
|
||||||
or ""
|
or ""
|
||||||
)
|
)
|
||||||
logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'")
|
logger.debug("REDIS MEM get '%s:%s': '%s'", self.full_key_prefix, key, res)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def set(self, key: str, value: Optional[str]) -> None:
|
def set(self, key: str, value: Optional[str]) -> None:
|
||||||
@ -257,7 +263,11 @@ class RedisEntityStore(BaseEntityStore):
|
|||||||
return self.delete(key)
|
return self.delete(key)
|
||||||
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
|
self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl)
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}",
|
"REDIS MEM set '%s:%s': '%s' EX %s",
|
||||||
|
self.full_key_prefix,
|
||||||
|
key,
|
||||||
|
value,
|
||||||
|
self.ttl,
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ class MultiQueryRetriever(BaseRetriever):
|
|||||||
)
|
)
|
||||||
lines = response["text"] if isinstance(self.llm_chain, LLMChain) else response
|
lines = response["text"] if isinstance(self.llm_chain, LLMChain) else response
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
logger.info(f"Generated queries: {lines}")
|
logger.info("Generated queries: %s", lines)
|
||||||
return lines
|
return lines
|
||||||
|
|
||||||
async def aretrieve_documents(
|
async def aretrieve_documents(
|
||||||
@ -194,7 +194,7 @@ class MultiQueryRetriever(BaseRetriever):
|
|||||||
)
|
)
|
||||||
lines = response["text"] if isinstance(self.llm_chain, LLMChain) else response
|
lines = response["text"] if isinstance(self.llm_chain, LLMChain) else response
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
logger.info(f"Generated queries: {lines}")
|
logger.info("Generated queries: %s", lines)
|
||||||
return lines
|
return lines
|
||||||
|
|
||||||
def retrieve_documents(
|
def retrieve_documents(
|
||||||
|
@ -74,7 +74,7 @@ class RePhraseQueryRetriever(BaseRetriever):
|
|||||||
query,
|
query,
|
||||||
{"callbacks": run_manager.get_child()},
|
{"callbacks": run_manager.get_child()},
|
||||||
)
|
)
|
||||||
logger.info(f"Re-phrased question: {re_phrased_question}")
|
logger.info("Re-phrased question: %s", re_phrased_question)
|
||||||
return self.retriever.invoke(
|
return self.retriever.invoke(
|
||||||
re_phrased_question,
|
re_phrased_question,
|
||||||
config={"callbacks": run_manager.get_child()},
|
config={"callbacks": run_manager.get_child()},
|
||||||
|
@ -320,7 +320,7 @@ class SelfQueryRetriever(BaseRetriever):
|
|||||||
config={"callbacks": run_manager.get_child()},
|
config={"callbacks": run_manager.get_child()},
|
||||||
)
|
)
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
logger.info(f"Generated Query: {structured_query}")
|
logger.info("Generated Query: %s", structured_query)
|
||||||
new_query, search_kwargs = self._prepare_query(query, structured_query)
|
new_query, search_kwargs = self._prepare_query(query, structured_query)
|
||||||
return self._get_docs_with_query(new_query, search_kwargs)
|
return self._get_docs_with_query(new_query, search_kwargs)
|
||||||
|
|
||||||
@ -343,7 +343,7 @@ class SelfQueryRetriever(BaseRetriever):
|
|||||||
config={"callbacks": run_manager.get_child()},
|
config={"callbacks": run_manager.get_child()},
|
||||||
)
|
)
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
logger.info(f"Generated Query: {structured_query}")
|
logger.info("Generated Query: %s", structured_query)
|
||||||
new_query, search_kwargs = self._prepare_query(query, structured_query)
|
new_query, search_kwargs = self._prepare_query(query, structured_query)
|
||||||
return await self._aget_docs_with_query(new_query, search_kwargs)
|
return await self._aget_docs_with_query(new_query, search_kwargs)
|
||||||
|
|
||||||
|
@ -207,7 +207,7 @@ def _wrap_in_chain_factory(
|
|||||||
# It's an arbitrary function, wrap it in a RunnableLambda
|
# It's an arbitrary function, wrap it in a RunnableLambda
|
||||||
user_func = cast(Callable, llm_or_chain_factory)
|
user_func = cast(Callable, llm_or_chain_factory)
|
||||||
sig = inspect.signature(user_func)
|
sig = inspect.signature(user_func)
|
||||||
logger.info(f"Wrapping function {sig} as RunnableLambda.")
|
logger.info("Wrapping function %s as RunnableLambda.", sig)
|
||||||
wrapped = RunnableLambda(user_func)
|
wrapped = RunnableLambda(user_func)
|
||||||
return lambda: wrapped
|
return lambda: wrapped
|
||||||
constructor = cast(Callable, llm_or_chain_factory)
|
constructor = cast(Callable, llm_or_chain_factory)
|
||||||
@ -416,7 +416,7 @@ def _validate_example_inputs(
|
|||||||
# Otherwise it's a runnable
|
# Otherwise it's a runnable
|
||||||
_validate_example_inputs_for_chain(example, chain, input_mapper)
|
_validate_example_inputs_for_chain(example, chain, input_mapper)
|
||||||
elif isinstance(chain, Runnable):
|
elif isinstance(chain, Runnable):
|
||||||
logger.debug(f"Skipping input validation for {chain}")
|
logger.debug("Skipping input validation for %s", chain)
|
||||||
|
|
||||||
|
|
||||||
## Shared Evaluator Setup Utilities
|
## Shared Evaluator Setup Utilities
|
||||||
@ -461,16 +461,19 @@ def _determine_input_key(
|
|||||||
input_key = config.input_key
|
input_key = config.input_key
|
||||||
if run_inputs and input_key not in run_inputs:
|
if run_inputs and input_key not in run_inputs:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Input key {input_key} not in chain's specified"
|
"Input key %s not in chain's specified input keys %s. "
|
||||||
f" input keys {run_inputs}. Evaluation behavior may be undefined.",
|
"Evaluation behavior may be undefined.",
|
||||||
|
input_key,
|
||||||
|
run_inputs,
|
||||||
)
|
)
|
||||||
elif run_inputs and len(run_inputs) == 1:
|
elif run_inputs and len(run_inputs) == 1:
|
||||||
input_key = run_inputs[0]
|
input_key = run_inputs[0]
|
||||||
elif run_inputs is not None and len(run_inputs) > 1:
|
elif run_inputs is not None and len(run_inputs) > 1:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Chain expects multiple input keys: {run_inputs},"
|
"Chain expects multiple input keys: %s,"
|
||||||
f" Evaluator is likely to fail. Evaluation behavior may be undefined."
|
" Evaluator is likely to fail. Evaluation behavior may be undefined."
|
||||||
" Specify an input_key in the RunEvalConfig to avoid this warning.",
|
" Specify an input_key in the RunEvalConfig to avoid this warning.",
|
||||||
|
run_inputs,
|
||||||
)
|
)
|
||||||
|
|
||||||
return input_key
|
return input_key
|
||||||
@ -485,16 +488,19 @@ def _determine_prediction_key(
|
|||||||
prediction_key = config.prediction_key
|
prediction_key = config.prediction_key
|
||||||
if run_outputs and prediction_key not in run_outputs:
|
if run_outputs and prediction_key not in run_outputs:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Prediction key {prediction_key} not in chain's specified"
|
"Prediction key %s not in chain's specified output keys %s. "
|
||||||
f" output keys {run_outputs}. Evaluation behavior may be undefined.",
|
"Evaluation behavior may be undefined.",
|
||||||
|
prediction_key,
|
||||||
|
run_outputs,
|
||||||
)
|
)
|
||||||
elif run_outputs and len(run_outputs) == 1:
|
elif run_outputs and len(run_outputs) == 1:
|
||||||
prediction_key = run_outputs[0]
|
prediction_key = run_outputs[0]
|
||||||
elif run_outputs is not None and len(run_outputs) > 1:
|
elif run_outputs is not None and len(run_outputs) > 1:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Chain expects multiple output keys: {run_outputs},"
|
"Chain expects multiple output keys: %s,"
|
||||||
f" Evaluation behavior may be undefined. Specify a prediction_key"
|
" Evaluation behavior may be undefined. Specify a prediction_key"
|
||||||
" in the RunEvalConfig to avoid this warning.",
|
" in the RunEvalConfig to avoid this warning.",
|
||||||
|
run_outputs,
|
||||||
)
|
)
|
||||||
return prediction_key
|
return prediction_key
|
||||||
|
|
||||||
@ -820,9 +826,11 @@ async def _arun_llm_or_chain(
|
|||||||
result = output
|
result = output
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"{chain_or_llm} failed for example {example.id} "
|
"%s failed for example %s with inputs %s\n%s",
|
||||||
f"with inputs {example.inputs}"
|
chain_or_llm,
|
||||||
f"\n{e!r}",
|
example.id,
|
||||||
|
example.inputs,
|
||||||
|
e,
|
||||||
)
|
)
|
||||||
result = EvalError(Error=e)
|
result = EvalError(Error=e)
|
||||||
return result
|
return result
|
||||||
@ -981,9 +989,12 @@ def _run_llm_or_chain(
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_type = type(e).__name__
|
error_type = type(e).__name__
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"{chain_or_llm} failed for example {example.id} "
|
"%s failed for example %s with inputs %s\nError Type: %s, Message: %s",
|
||||||
f"with inputs {example.inputs}"
|
chain_or_llm,
|
||||||
f"\nError Type: {error_type}, Message: {e}",
|
example.id,
|
||||||
|
example.inputs,
|
||||||
|
error_type,
|
||||||
|
e,
|
||||||
)
|
)
|
||||||
result = EvalError(Error=e)
|
result = EvalError(Error=e)
|
||||||
return result
|
return result
|
||||||
@ -1113,7 +1124,9 @@ class _DatasetRunContainer:
|
|||||||
project_id=self.project.id,
|
project_id=self.project.id,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error running batch evaluator {evaluator!r}: {e}")
|
logger.exception(
|
||||||
|
"Error running batch evaluator %s: %s", repr(evaluator), e
|
||||||
|
)
|
||||||
return aggregate_feedback
|
return aggregate_feedback
|
||||||
|
|
||||||
def _collect_metrics(self) -> tuple[dict[str, _RowResult], dict[str, Run]]:
|
def _collect_metrics(self) -> tuple[dict[str, _RowResult], dict[str, Run]]:
|
||||||
@ -1174,7 +1187,7 @@ class _DatasetRunContainer:
|
|||||||
agg_feedback = results.get_aggregate_feedback()
|
agg_feedback = results.get_aggregate_feedback()
|
||||||
_display_aggregate_results(agg_feedback)
|
_display_aggregate_results(agg_feedback)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug(f"Failed to print aggregate feedback: {e!r}")
|
logger.debug("Failed to print aggregate feedback: %s", e, exc_info=True)
|
||||||
try:
|
try:
|
||||||
# Closing the project permits name changing and metric optimizations
|
# Closing the project permits name changing and metric optimizations
|
||||||
self.client.update_project(
|
self.client.update_project(
|
||||||
@ -1182,7 +1195,7 @@ class _DatasetRunContainer:
|
|||||||
end_time=datetime.now(timezone.utc),
|
end_time=datetime.now(timezone.utc),
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug(f"Failed to close project: {e!r}")
|
logger.debug("Failed to close project: %s", e, exc_info=True)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -159,6 +159,7 @@ select = [
|
|||||||
"FA", # flake8-future-annotations
|
"FA", # flake8-future-annotations
|
||||||
"FBT", # flake8-boolean-trap
|
"FBT", # flake8-boolean-trap
|
||||||
"FLY", # flake8-flynt
|
"FLY", # flake8-flynt
|
||||||
|
"G", # flake8-logging-format
|
||||||
"I", # isort
|
"I", # isort
|
||||||
"ICN", # flake8-import-conventions
|
"ICN", # flake8-import-conventions
|
||||||
"INT", # flake8-gettext
|
"INT", # flake8-gettext
|
||||||
|
@ -2882,7 +2882,7 @@ wheels = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "langchain-openai"
|
name = "langchain-openai"
|
||||||
version = "0.3.27"
|
version = "0.3.28"
|
||||||
source = { editable = "../partners/openai" }
|
source = { editable = "../partners/openai" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "langchain-core" },
|
{ name = "langchain-core" },
|
||||||
|
Loading…
Reference in New Issue
Block a user