diff --git a/libs/langchain/langchain/agents/agent_iterator.py b/libs/langchain/langchain/agents/agent_iterator.py index bb3d8b5b08c..12ef5a198cd 100644 --- a/libs/langchain/langchain/agents/agent_iterator.py +++ b/libs/langchain/langchain/agents/agent_iterator.py @@ -190,14 +190,14 @@ class AgentExecutorIterator: name=self.run_name, ) try: - while self.agent_executor._should_continue( + while self.agent_executor._should_continue( # noqa: SLF001 self.iterations, self.time_elapsed, ): # take the next step: this plans next action, executes it, # yielding action and observation as they are generated next_step_seq: NextStepOutput = [] - for chunk in self.agent_executor._iter_next_step( + for chunk in self.agent_executor._iter_next_step( # noqa: SLF001 self.name_to_tool_map, self.color_mapping, self.inputs, @@ -214,7 +214,7 @@ class AgentExecutorIterator: yield AddableDict(steps=[chunk], messages=chunk.messages) # convert iterator output to format handled by _process_next_step_output - next_step = self.agent_executor._consume_next_step(next_step_seq) + next_step = self.agent_executor._consume_next_step(next_step_seq) # noqa: SLF001 # update iterations and time elapsed self.update_iterations() # decide if this is the final output @@ -258,14 +258,14 @@ class AgentExecutorIterator: ) try: async with asyncio_timeout(self.agent_executor.max_execution_time): - while self.agent_executor._should_continue( + while self.agent_executor._should_continue( # noqa: SLF001 self.iterations, self.time_elapsed, ): # take the next step: this plans next action, executes it, # yielding action and observation as they are generated next_step_seq: NextStepOutput = [] - async for chunk in self.agent_executor._aiter_next_step( + async for chunk in self.agent_executor._aiter_next_step( # noqa: SLF001 self.name_to_tool_map, self.color_mapping, self.inputs, @@ -288,7 +288,7 @@ class AgentExecutorIterator: ) # convert iterator output to format handled by _process_next_step - next_step = self.agent_executor._consume_next_step(next_step_seq) + next_step = self.agent_executor._consume_next_step(next_step_seq) # noqa: SLF001 # update iterations and time elapsed self.update_iterations() # decide if this is the final output @@ -336,7 +336,7 @@ class AgentExecutorIterator: # Check for tool return if len(next_step_output) == 1: next_step_action = next_step_output[0] - tool_return = self.agent_executor._get_tool_return(next_step_action) + tool_return = self.agent_executor._get_tool_return(next_step_action) # noqa: SLF001 if tool_return is not None: return self._return(tool_return, run_manager=run_manager) @@ -364,7 +364,7 @@ class AgentExecutorIterator: # Check for tool return if len(next_step_output) == 1: next_step_action = next_step_output[0] - tool_return = self.agent_executor._get_tool_return(next_step_action) + tool_return = self.agent_executor._get_tool_return(next_step_action) # noqa: SLF001 if tool_return is not None: return await self._areturn(tool_return, run_manager=run_manager) @@ -376,7 +376,7 @@ class AgentExecutorIterator: """ logger.warning("Stopping agent prematurely due to triggering stop condition") # this manually constructs agent finish with output key - output = self.agent_executor._action_agent.return_stopped_response( + output = self.agent_executor._action_agent.return_stopped_response( # noqa: SLF001 self.agent_executor.early_stopping_method, self.intermediate_steps, **self.inputs, @@ -389,7 +389,7 @@ class AgentExecutorIterator: the stopped response. """ logger.warning("Stopping agent prematurely due to triggering stop condition") - output = self.agent_executor._action_agent.return_stopped_response( + output = self.agent_executor._action_agent.return_stopped_response( # noqa: SLF001 self.agent_executor.early_stopping_method, self.intermediate_steps, **self.inputs, @@ -404,7 +404,7 @@ class AgentExecutorIterator: """ Return the final output of the iterator. """ - returned_output = self.agent_executor._return( + returned_output = self.agent_executor._return( # noqa: SLF001 output, self.intermediate_steps, run_manager=run_manager, @@ -421,7 +421,7 @@ class AgentExecutorIterator: """ Return the final output of the async iterator. """ - returned_output = await self.agent_executor._areturn( + returned_output = await self.agent_executor._areturn( # noqa: SLF001 output, self.intermediate_steps, run_manager=run_manager, diff --git a/libs/langchain/langchain/agents/initialize.py b/libs/langchain/langchain/agents/initialize.py index 38717e89c63..3d8164475ae 100644 --- a/libs/langchain/langchain/agents/initialize.py +++ b/libs/langchain/langchain/agents/initialize.py @@ -90,7 +90,7 @@ def initialize_agent( ) with contextlib.suppress(NotImplementedError): # TODO: Add tags from the serialized object directly. - tags_.append(agent_obj._agent_type) + tags_.append(agent_obj._agent_type) # noqa: SLF001 else: msg = ( "Somehow both `agent` and `agent_path` are None, this should never happen." diff --git a/libs/langchain/langchain/agents/openai_functions_agent/base.py b/libs/langchain/langchain/agents/openai_functions_agent/base.py index b28ec23f635..04d31dd614a 100644 --- a/libs/langchain/langchain/agents/openai_functions_agent/base.py +++ b/libs/langchain/langchain/agents/openai_functions_agent/base.py @@ -134,7 +134,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent): messages, callbacks=callbacks, ) - return self.output_parser._parse_ai_message(predicted_message) + return self.output_parser.parse_ai_message(predicted_message) async def aplan( self, @@ -167,7 +167,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent): functions=self.functions, callbacks=callbacks, ) - return self.output_parser._parse_ai_message(predicted_message) + return self.output_parser.parse_ai_message(predicted_message) def return_stopped_response( self, diff --git a/libs/langchain/langchain/agents/output_parsers/openai_functions.py b/libs/langchain/langchain/agents/output_parsers/openai_functions.py index 0dfb6405913..586cf980466 100644 --- a/libs/langchain/langchain/agents/output_parsers/openai_functions.py +++ b/libs/langchain/langchain/agents/output_parsers/openai_functions.py @@ -30,7 +30,7 @@ class OpenAIFunctionsAgentOutputParser(AgentOutputParser): return "openai-functions-agent" @staticmethod - def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]: + def parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]: """Parse an AI message.""" if not isinstance(message, AIMessage): msg = f"Expected an AI message got {type(message)}" @@ -89,7 +89,7 @@ class OpenAIFunctionsAgentOutputParser(AgentOutputParser): msg = "This output parser only works on ChatGeneration output" raise ValueError(msg) # noqa: TRY004 message = result[0].message - return self._parse_ai_message(message) + return self.parse_ai_message(message) def parse(self, text: str) -> Union[AgentAction, AgentFinish]: msg = "Can only parse messages" diff --git a/libs/langchain/langchain/chains/combine_documents/stuff.py b/libs/langchain/langchain/chains/combine_documents/stuff.py index 0b380dd8bf6..a4cefc8998a 100644 --- a/libs/langchain/langchain/chains/combine_documents/stuff.py +++ b/libs/langchain/langchain/chains/combine_documents/stuff.py @@ -239,7 +239,7 @@ class StuffDocumentsChain(BaseCombineDocumentsChain): """ inputs = self._get_inputs(docs, **kwargs) prompt = self.llm_chain.prompt.format(**inputs) - return self.llm_chain._get_num_tokens(prompt) + return self.llm_chain._get_num_tokens(prompt) # noqa: SLF001 def combine_docs( self, diff --git a/libs/langchain/langchain/chains/conversational_retrieval/base.py b/libs/langchain/langchain/chains/conversational_retrieval/base.py index 9a8bcdfe758..8a7934472af 100644 --- a/libs/langchain/langchain/chains/conversational_retrieval/base.py +++ b/libs/langchain/langchain/chains/conversational_retrieval/base.py @@ -388,7 +388,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain): StuffDocumentsChain, ): tokens = [ - self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content) + self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001 for doc in docs ] token_count = sum(tokens[:num_docs]) diff --git a/libs/langchain/langchain/chains/qa_with_sources/retrieval.py b/libs/langchain/langchain/chains/qa_with_sources/retrieval.py index c27dda97192..9f0e4f4ec64 100644 --- a/libs/langchain/langchain/chains/qa_with_sources/retrieval.py +++ b/libs/langchain/langchain/chains/qa_with_sources/retrieval.py @@ -33,7 +33,7 @@ class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain): StuffDocumentsChain, ): tokens = [ - self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) + self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001 for doc in docs ] token_count = sum(tokens[:num_docs]) diff --git a/libs/langchain/langchain/chains/qa_with_sources/vector_db.py b/libs/langchain/langchain/chains/qa_with_sources/vector_db.py index 566309bde41..26091871e42 100644 --- a/libs/langchain/langchain/chains/qa_with_sources/vector_db.py +++ b/libs/langchain/langchain/chains/qa_with_sources/vector_db.py @@ -38,7 +38,7 @@ class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain): StuffDocumentsChain, ): tokens = [ - self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) + self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001 for doc in docs ] token_count = sum(tokens[:num_docs]) diff --git a/libs/langchain/langchain/output_parsers/combining.py b/libs/langchain/langchain/output_parsers/combining.py index bf112818184..2db7628be1b 100644 --- a/libs/langchain/langchain/output_parsers/combining.py +++ b/libs/langchain/langchain/output_parsers/combining.py @@ -25,10 +25,10 @@ class CombiningOutputParser(BaseOutputParser[dict[str, Any]]): msg = "Must have at least two parsers" raise ValueError(msg) for parser in parsers: - if parser._type == "combining": + if parser._type == "combining": # noqa: SLF001 msg = "Cannot nest combining parsers" raise ValueError(msg) - if parser._type == "list": + if parser._type == "list": # noqa: SLF001 msg = "Cannot combine list parsers" raise ValueError(msg) return values diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 04a06d44351..135c1b15bf3 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -200,7 +200,6 @@ ignore = [ "PLR09", # Too many something (args, statements, etc) "S112", # Rarely useful "RUF012", # Doesn't play well with Pydantic - "SLF001", # Private member access "UP007", # pyupgrade: non-pep604-annotation-union # TODO rules