diff --git a/libs/langchain/langchain/agents/agent.py b/libs/langchain/langchain/agents/agent.py index 70ae7f3abf5..f13f668bfce 100644 --- a/libs/langchain/langchain/agents/agent.py +++ b/libs/langchain/langchain/agents/agent.py @@ -1013,7 +1013,7 @@ class Agent(BaseSingleActionAgent): } -class ExceptionTool(BaseTool): # type: ignore[override] +class ExceptionTool(BaseTool): """Tool that just returns the query.""" name: str = "_Exception" @@ -1129,7 +1129,7 @@ class AgentExecutor(Chain): """ agent = self.agent tools = self.tools - allowed_tools = agent.get_allowed_tools() # type: ignore + allowed_tools = agent.get_allowed_tools() # type: ignore[union-attr] if allowed_tools is not None: if set(allowed_tools) != set([tool.name for tool in tools]): raise ValueError( diff --git a/libs/langchain/langchain/agents/chat/base.py b/libs/langchain/langchain/agents/chat/base.py index 7e84962240a..19488b3b552 100644 --- a/libs/langchain/langchain/agents/chat/base.py +++ b/libs/langchain/langchain/agents/chat/base.py @@ -120,7 +120,7 @@ class ChatAgent(Agent): ] if input_variables is None: input_variables = ["input", "agent_scratchpad"] - return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type] + return ChatPromptTemplate(input_variables=input_variables, messages=messages) @classmethod def from_llm_and_tools( diff --git a/libs/langchain/langchain/agents/conversational/base.py b/libs/langchain/langchain/agents/conversational/base.py index 76217968bde..32846b0c85f 100644 --- a/libs/langchain/langchain/agents/conversational/base.py +++ b/libs/langchain/langchain/agents/conversational/base.py @@ -154,7 +154,7 @@ class ConversationalAgent(Agent): format_instructions=format_instructions, input_variables=input_variables, ) - llm_chain = LLMChain( # type: ignore[misc] + llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, diff --git a/libs/langchain/langchain/agents/conversational_chat/base.py b/libs/langchain/langchain/agents/conversational_chat/base.py index a03a461d0c5..35bf4bffbd9 100644 --- a/libs/langchain/langchain/agents/conversational_chat/base.py +++ b/libs/langchain/langchain/agents/conversational_chat/base.py @@ -114,7 +114,7 @@ class ConversationalChatAgent(Agent): HumanMessagePromptTemplate.from_template(final_prompt), MessagesPlaceholder(variable_name="agent_scratchpad"), ] - return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type] + return ChatPromptTemplate(input_variables=input_variables, messages=messages) def _construct_scratchpad( self, intermediate_steps: list[tuple[AgentAction, str]] @@ -165,7 +165,7 @@ class ConversationalChatAgent(Agent): input_variables=input_variables, output_parser=_output_parser, ) - llm_chain = LLMChain( # type: ignore[misc] + llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, diff --git a/libs/langchain/langchain/agents/loading.py b/libs/langchain/langchain/agents/loading.py index 9b4263fff2e..08fc6fa8782 100644 --- a/libs/langchain/langchain/agents/loading.py +++ b/libs/langchain/langchain/agents/loading.py @@ -86,7 +86,7 @@ def load_agent_from_config( del config["output_parser"] combined_config = {**config, **kwargs} - return agent_cls(**combined_config) # type: ignore + return agent_cls(**combined_config) @deprecated("0.1.0", removal="1.0") diff --git a/libs/langchain/langchain/agents/mrkl/base.py b/libs/langchain/langchain/agents/mrkl/base.py index bc63080c0c7..727ca66b390 100644 --- a/libs/langchain/langchain/agents/mrkl/base.py +++ b/libs/langchain/langchain/agents/mrkl/base.py @@ -144,7 +144,7 @@ class ZeroShotAgent(Agent): format_instructions=format_instructions, input_variables=input_variables, ) - llm_chain = LLMChain( # type: ignore[misc] + llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, diff --git a/libs/langchain/langchain/agents/openai_assistant/base.py b/libs/langchain/langchain/agents/openai_assistant/base.py index acc681f68f9..363a6da86e0 100644 --- a/libs/langchain/langchain/agents/openai_assistant/base.py +++ b/libs/langchain/langchain/agents/openai_assistant/base.py @@ -127,7 +127,7 @@ def _get_assistants_tool( such as "code_interpreter" and "file_search". """ if _is_assistants_builtin_tool(tool): - return tool # type: ignore + return tool # type: ignore[return-value] else: return convert_to_openai_tool(tool) @@ -267,7 +267,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]): assistant = client.beta.assistants.create( name=name, instructions=instructions, - tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore + tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore[misc] model=model, ) return cls(assistant_id=assistant.id, client=client, **kwargs) @@ -394,7 +394,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]): assistant = await async_client.beta.assistants.create( name=name, instructions=instructions, - tools=openai_tools, # type: ignore + tools=openai_tools, # type: ignore[arg-type] model=model, ) return cls(assistant_id=assistant.id, async_client=async_client, **kwargs) diff --git a/libs/langchain/langchain/agents/openai_functions_agent/agent_token_buffer_memory.py b/libs/langchain/langchain/agents/openai_functions_agent/agent_token_buffer_memory.py index 57370651da2..284e0e72c93 100644 --- a/libs/langchain/langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +++ b/libs/langchain/langchain/agents/openai_functions_agent/agent_token_buffer_memory.py @@ -12,7 +12,7 @@ from langchain.agents.format_scratchpad import ( from langchain.memory.chat_memory import BaseChatMemory -class AgentTokenBufferMemory(BaseChatMemory): # type: ignore[override] +class AgentTokenBufferMemory(BaseChatMemory): """Memory used to save agent output AND intermediate steps. Parameters: diff --git a/libs/langchain/langchain/agents/openai_functions_agent/base.py b/libs/langchain/langchain/agents/openai_functions_agent/base.py index c1a3e6e9d6c..8442f226f51 100644 --- a/libs/langchain/langchain/agents/openai_functions_agent/base.py +++ b/libs/langchain/langchain/agents/openai_functions_agent/base.py @@ -241,7 +241,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent): MessagesPlaceholder(variable_name="agent_scratchpad"), ] ) - return ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg] + return ChatPromptTemplate(messages=messages) @classmethod def from_llm_and_tools( diff --git a/libs/langchain/langchain/agents/openai_functions_multi_agent/base.py b/libs/langchain/langchain/agents/openai_functions_multi_agent/base.py index b4412d2c964..2931f7e7519 100644 --- a/libs/langchain/langchain/agents/openai_functions_multi_agent/base.py +++ b/libs/langchain/langchain/agents/openai_functions_multi_agent/base.py @@ -286,7 +286,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent): MessagesPlaceholder(variable_name="agent_scratchpad"), ] ) - return ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg] + return ChatPromptTemplate(messages=messages) @classmethod def from_llm_and_tools( diff --git a/libs/langchain/langchain/agents/output_parsers/tools.py b/libs/langchain/langchain/agents/output_parsers/tools.py index a8fef36fa6b..b7d28e69bd0 100644 --- a/libs/langchain/langchain/agents/output_parsers/tools.py +++ b/libs/langchain/langchain/agents/output_parsers/tools.py @@ -14,7 +14,7 @@ from langchain_core.outputs import ChatGeneration, Generation from langchain.agents.agent import MultiActionAgentOutputParser -class ToolAgentAction(AgentActionMessageLog): # type: ignore[override] +class ToolAgentAction(AgentActionMessageLog): tool_call_id: str """Tool call that this message is responding to.""" diff --git a/libs/langchain/langchain/agents/tools.py b/libs/langchain/langchain/agents/tools.py index a71140fdb27..9591fffb710 100644 --- a/libs/langchain/langchain/agents/tools.py +++ b/libs/langchain/langchain/agents/tools.py @@ -9,7 +9,7 @@ from langchain_core.callbacks import ( from langchain_core.tools import BaseTool, tool -class InvalidTool(BaseTool): # type: ignore[override] +class InvalidTool(BaseTool): """Tool that is run when invalid tool name is encountered by agent.""" name: str = "invalid_tool" diff --git a/libs/langchain/langchain/chains/api/base.py b/libs/langchain/langchain/chains/api/base.py index 8521b61e3f4..34dcb6c4072 100644 --- a/libs/langchain/langchain/chains/api/base.py +++ b/libs/langchain/langchain/chains/api/base.py @@ -199,9 +199,7 @@ try: api_docs: str question_key: str = "question" #: :meta private: output_key: str = "output" #: :meta private: - limit_to_domains: Optional[Sequence[str]] = Field( - default_factory=list # type: ignore - ) + limit_to_domains: Optional[Sequence[str]] = Field(default_factory=list) # type: ignore[arg-type] """Use to limit the domains that can be accessed by the API chain. * For example, to limit to just the domain `https://www.example.com`, set diff --git a/libs/langchain/langchain/chains/base.py b/libs/langchain/langchain/chains/base.py index 313702d4c1b..2bae805f070 100644 --- a/libs/langchain/langchain/chains/base.py +++ b/libs/langchain/langchain/chains/base.py @@ -110,17 +110,13 @@ class Chain(RunnableSerializable[dict[str, Any], dict[str, Any]], ABC): self, config: Optional[RunnableConfig] = None ) -> type[BaseModel]: # This is correct, but pydantic typings/mypy don't think so. - return create_model( # type: ignore[call-overload] - "ChainInput", **{k: (Any, None) for k in self.input_keys} - ) + return create_model("ChainInput", **{k: (Any, None) for k in self.input_keys}) def get_output_schema( self, config: Optional[RunnableConfig] = None ) -> type[BaseModel]: # This is correct, but pydantic typings/mypy don't think so. - return create_model( # type: ignore[call-overload] - "ChainOutput", **{k: (Any, None) for k in self.output_keys} - ) + return create_model("ChainOutput", **{k: (Any, None) for k in self.output_keys}) def invoke( self, diff --git a/libs/langchain/langchain/chains/combine_documents/base.py b/libs/langchain/langchain/chains/combine_documents/base.py index 7ca1995ad60..ba2afb7c226 100644 --- a/libs/langchain/langchain/chains/combine_documents/base.py +++ b/libs/langchain/langchain/chains/combine_documents/base.py @@ -50,7 +50,7 @@ class BaseCombineDocumentsChain(Chain, ABC): ) -> type[BaseModel]: return create_model( "CombineDocumentsInput", - **{self.input_key: (list[Document], None)}, # type: ignore[call-overload] + **{self.input_key: (list[Document], None)}, ) def get_output_schema( @@ -58,7 +58,7 @@ class BaseCombineDocumentsChain(Chain, ABC): ) -> type[BaseModel]: return create_model( "CombineDocumentsOutput", - **{self.output_key: (str, None)}, # type: ignore[call-overload] + **{self.output_key: (str, None)}, ) @property @@ -249,7 +249,7 @@ class AnalyzeDocumentChain(Chain): ) -> type[BaseModel]: return create_model( "AnalyzeDocumentChain", - **{self.input_key: (str, None)}, # type: ignore[call-overload] + **{self.input_key: (str, None)}, ) def get_output_schema( diff --git a/libs/langchain/langchain/chains/combine_documents/map_reduce.py b/libs/langchain/langchain/chains/combine_documents/map_reduce.py index f36f760de13..b042ed72422 100644 --- a/libs/langchain/langchain/chains/combine_documents/map_reduce.py +++ b/libs/langchain/langchain/chains/combine_documents/map_reduce.py @@ -120,7 +120,7 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain): **{ self.output_key: (str, None), "intermediate_steps": (list[str], None), - }, # type: ignore[call-overload] + }, ) return super().get_output_schema(config) diff --git a/libs/langchain/langchain/chains/conversation/base.py b/libs/langchain/langchain/chains/conversation/base.py index 640da04c775..8f4e295cf05 100644 --- a/libs/langchain/langchain/chains/conversation/base.py +++ b/libs/langchain/langchain/chains/conversation/base.py @@ -19,7 +19,7 @@ from langchain.memory.buffer import ConversationBufferMemory ), removal="1.0", ) -class ConversationChain(LLMChain): # type: ignore[override, override] +class ConversationChain(LLMChain): """Chain to have a conversation and load context from memory. This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer diff --git a/libs/langchain/langchain/chains/elasticsearch_database/base.py b/libs/langchain/langchain/chains/elasticsearch_database/base.py index 5a03e51eee8..acf1e8f88ab 100644 --- a/libs/langchain/langchain/chains/elasticsearch_database/base.py +++ b/libs/langchain/langchain/chains/elasticsearch_database/base.py @@ -170,7 +170,7 @@ class ElasticsearchDatabaseChain(Chain): except Exception as exc: # Append intermediate steps to exception, to aid in logging and later # improvement of few shot prompt seeds - exc.intermediate_steps = intermediate_steps # type: ignore + exc.intermediate_steps = intermediate_steps # type: ignore[attr-defined] raise exc @property diff --git a/libs/langchain/langchain/chains/loading.py b/libs/langchain/langchain/chains/loading.py index 2bba3b82d1b..3ce5f4b4379 100644 --- a/libs/langchain/langchain/chains/loading.py +++ b/libs/langchain/langchain/chains/loading.py @@ -39,16 +39,14 @@ try: from langchain_community.llms.loading import load_llm, load_llm_from_config except ImportError: - def load_llm(*args: Any, **kwargs: Any) -> None: # type: ignore + def load_llm(*args: Any, **kwargs: Any) -> None: raise ImportError( "To use this load_llm functionality you must install the " "langchain_community package. " "You can install it with `pip install langchain_community`" ) - def load_llm_from_config( # type: ignore - *args: Any, **kwargs: Any - ) -> None: + def load_llm_from_config(*args: Any, **kwargs: Any) -> None: raise ImportError( "To use this load_llm_from_config functionality you must install the " "langchain_community package. " @@ -95,9 +93,9 @@ def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedde else: raise ValueError("`embeddings` must be present.") return HypotheticalDocumentEmbedder( - llm_chain=llm_chain, # type: ignore[arg-type] + llm_chain=llm_chain, base_embeddings=embeddings, - **config, # type: ignore[arg-type] + **config, ) @@ -160,7 +158,7 @@ def _load_map_reduce_documents_chain( ) -def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain: # type: ignore[valid-type] +def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain: combine_documents_chain = None collapse_documents_chain = None @@ -213,7 +211,7 @@ def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocuments config.pop("collapse_document_chain_path"), **kwargs ) - return ReduceDocumentsChain( # type: ignore[misc] + return ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_documents_chain, **config, @@ -245,7 +243,7 @@ def _load_llm_bash_chain(config: dict, **kwargs: Any) -> Any: elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) if llm_chain: - return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config) # type: ignore[arg-type] + return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config) else: return LLMBashChain(llm=llm, prompt=prompt, **config) @@ -347,7 +345,7 @@ def _load_pal_chain(config: dict, **kwargs: Any) -> Any: llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") - return PALChain(llm_chain=llm_chain, **config) # type: ignore[arg-type] + return PALChain(llm_chain=llm_chain, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: @@ -410,7 +408,7 @@ def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") chain = load_chain_from_config(llm_chain_config, **kwargs) - return SQLDatabaseChain(llm_chain=chain, database=database, **config) # type: ignore[arg-type] + return SQLDatabaseChain(llm_chain=chain, database=database, **config) if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config, **kwargs) @@ -563,8 +561,8 @@ def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain: ) return GraphCypherQAChain( graph=graph, - cypher_generation_chain=cypher_generation_chain, # type: ignore[arg-type] - qa_chain=qa_chain, # type: ignore[arg-type] + cypher_generation_chain=cypher_generation_chain, + qa_chain=qa_chain, **config, ) diff --git a/libs/langchain/langchain/chains/openai_functions/base.py b/libs/langchain/langchain/chains/openai_functions/base.py index 2aaa56f3bd8..ca191a3514f 100644 --- a/libs/langchain/langchain/chains/openai_functions/base.py +++ b/libs/langchain/langchain/chains/openai_functions/base.py @@ -51,7 +51,7 @@ def create_openai_fn_chain( output_key: str = "function", output_parser: Optional[BaseLLMOutputParser] = None, **kwargs: Any, -) -> LLMChain: # type: ignore[valid-type] +) -> LLMChain: """[Legacy] Create an LLM chain that uses OpenAI functions. Args: @@ -131,7 +131,7 @@ def create_openai_fn_chain( } if len(openai_functions) == 1 and enforce_single_function_usage: llm_kwargs["function_call"] = {"name": openai_functions[0]["name"]} - llm_chain = LLMChain( # type: ignore[misc] + llm_chain = LLMChain( llm=llm, prompt=prompt, output_parser=output_parser, @@ -153,7 +153,7 @@ def create_structured_output_chain( output_key: str = "function", output_parser: Optional[BaseLLMOutputParser] = None, **kwargs: Any, -) -> LLMChain: # type: ignore[valid-type] +) -> LLMChain: """[Legacy] Create an LLMChain that uses an OpenAI function to get a structured output. Args: @@ -216,7 +216,7 @@ def create_structured_output_chain( class _OutputFormatter(BaseModel): """Output formatter. Should always be used to format your response to the user.""" # noqa: E501 - output: output_schema # type: ignore + output: output_schema # type: ignore[valid-type] function = _OutputFormatter output_parser = output_parser or PydanticAttrOutputFunctionsParser( diff --git a/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py b/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py index 33b58f846df..674a0809cb0 100644 --- a/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py +++ b/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py @@ -148,7 +148,7 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain: ) ), ] - prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg] + prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type] chain = LLMChain( llm=llm, diff --git a/libs/langchain/langchain/chains/openai_functions/extraction.py b/libs/langchain/langchain/chains/openai_functions/extraction.py index b62caefb905..a549688c64d 100644 --- a/libs/langchain/langchain/chains/openai_functions/extraction.py +++ b/libs/langchain/langchain/chains/openai_functions/extraction.py @@ -170,7 +170,7 @@ def create_extraction_chain_pydantic( """ class PydanticSchema(BaseModel): - info: list[pydantic_schema] # type: ignore + info: list[pydantic_schema] if hasattr(pydantic_schema, "model_json_schema"): openai_schema = pydantic_schema.model_json_schema() diff --git a/libs/langchain/langchain/chains/openai_functions/openapi.py b/libs/langchain/langchain/chains/openai_functions/openapi.py index 36e5a93c162..d177c011739 100644 --- a/libs/langchain/langchain/chains/openai_functions/openapi.py +++ b/libs/langchain/langchain/chains/openai_functions/openapi.py @@ -77,7 +77,7 @@ def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) - if p.param_schema: schema = spec.get_schema(p.param_schema) else: - media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore + media_type_schema = list(p.content.values())[0].media_type_schema schema = spec.get_schema(media_type_schema) if p.description and not schema.description: schema.description = p.description @@ -363,7 +363,7 @@ def get_openapi_chain( OpenAPISpec.from_text, ): try: - spec = conversion(spec) # type: ignore[arg-type] + spec = conversion(spec) break except ImportError as e: raise e diff --git a/libs/langchain/langchain/chains/openai_functions/qa_with_structure.py b/libs/langchain/langchain/chains/openai_functions/qa_with_structure.py index 435964510ef..f76a2813416 100644 --- a/libs/langchain/langchain/chains/openai_functions/qa_with_structure.py +++ b/libs/langchain/langchain/chains/openai_functions/qa_with_structure.py @@ -96,7 +96,7 @@ def create_qa_with_structure_chain( HumanMessagePromptTemplate.from_template("Question: {question}"), HumanMessage(content="Tips: Make sure to answer in the correct format"), ] - prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg] + prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type] chain = LLMChain( llm=llm, diff --git a/libs/langchain/langchain/chains/qa_with_sources/base.py b/libs/langchain/langchain/chains/qa_with_sources/base.py index e44b77a35bc..7b2b371ee00 100644 --- a/libs/langchain/langchain/chains/qa_with_sources/base.py +++ b/libs/langchain/langchain/chains/qa_with_sources/base.py @@ -69,7 +69,7 @@ class BaseQAWithSourcesChain(Chain, ABC): document_prompt=document_prompt, document_variable_name="summaries", ) - reduce_documents_chain = ReduceDocumentsChain( # type: ignore[misc] + reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_results_chain ) combine_documents_chain = MapReduceDocumentsChain( diff --git a/libs/langchain/langchain/chains/query_constructor/parser.py b/libs/langchain/langchain/chains/query_constructor/parser.py index 948107bc01d..8e39fd1dc39 100644 --- a/libs/langchain/langchain/chains/query_constructor/parser.py +++ b/libs/langchain/langchain/chains/query_constructor/parser.py @@ -11,12 +11,12 @@ try: from lark import Lark, Transformer, v_args except ImportError: - def v_args(*args: Any, **kwargs: Any) -> Any: # type: ignore + def v_args(*args: Any, **kwargs: Any) -> Any: # type: ignore[misc] """Dummy decorator for when lark is not installed.""" return lambda _: None - Transformer = object # type: ignore - Lark = object # type: ignore + Transformer = object # type: ignore[assignment,misc] + Lark = object # type: ignore[assignment,misc] from langchain_core.structured_query import ( Comparator, diff --git a/libs/langchain/langchain/chains/question_answering/chain.py b/libs/langchain/langchain/chains/question_answering/chain.py index e2fd1790183..8dbf6934f4b 100644 --- a/libs/langchain/langchain/chains/question_answering/chain.py +++ b/libs/langchain/langchain/chains/question_answering/chain.py @@ -156,7 +156,7 @@ def _load_map_reduce_chain( verbose=verbose, # type: ignore[arg-type] callback_manager=callback_manager, ) - reduce_documents_chain = ReduceDocumentsChain( # type: ignore[misc] + reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_chain, token_max=token_max, diff --git a/libs/langchain/langchain/chains/router/multi_retrieval_qa.py b/libs/langchain/langchain/chains/router/multi_retrieval_qa.py index ea9906f08cb..fe900d86be4 100644 --- a/libs/langchain/langchain/chains/router/multi_retrieval_qa.py +++ b/libs/langchain/langchain/chains/router/multi_retrieval_qa.py @@ -20,7 +20,7 @@ from langchain.chains.router.multi_retrieval_prompt import ( ) -class MultiRetrievalQAChain(MultiRouteChain): # type: ignore[override] +class MultiRetrievalQAChain(MultiRouteChain): """A multi-route chain that uses an LLM router chain to choose amongst retrieval qa chains.""" diff --git a/libs/langchain/langchain/chains/sql_database/query.py b/libs/langchain/langchain/chains/sql_database/query.py index b422206d40d..a9123538257 100644 --- a/libs/langchain/langchain/chains/sql_database/query.py +++ b/libs/langchain/langchain/chains/sql_database/query.py @@ -134,7 +134,7 @@ def create_sql_query_chain( ), } return ( - RunnablePassthrough.assign(**inputs) # type: ignore + RunnablePassthrough.assign(**inputs) # type: ignore[return-value] | ( lambda x: { k: v diff --git a/libs/langchain/langchain/chains/structured_output/base.py b/libs/langchain/langchain/chains/structured_output/base.py index fc5d5fe301b..557e81884bb 100644 --- a/libs/langchain/langchain/chains/structured_output/base.py +++ b/libs/langchain/langchain/chains/structured_output/base.py @@ -520,7 +520,7 @@ def _create_openai_json_runnable( """""" if isinstance(output_schema, type) and is_basemodel_subclass(output_schema): output_parser = output_parser or PydanticOutputParser( - pydantic_object=output_schema, # type: ignore + pydantic_object=output_schema, ) schema_as_dict = convert_to_openai_function(output_schema)["parameters"] else: @@ -559,7 +559,7 @@ def _create_openai_functions_structured_output_runnable( class _OutputFormatter(BaseModel): """Output formatter. Should always be used to format your response to the user.""" # noqa: E501 - output: output_schema # type: ignore + output: output_schema # type: ignore[valid-type] function = _OutputFormatter output_parser = output_parser or PydanticAttrOutputFunctionsParser( diff --git a/libs/langchain/langchain/chains/summarize/chain.py b/libs/langchain/langchain/chains/summarize/chain.py index b486981c2fa..b5bccbd2d10 100644 --- a/libs/langchain/langchain/chains/summarize/chain.py +++ b/libs/langchain/langchain/chains/summarize/chain.py @@ -62,14 +62,14 @@ def _load_map_reduce_chain( llm=llm, prompt=map_prompt, verbose=verbose, # type: ignore[arg-type] - callbacks=callbacks, # type: ignore[arg-type] + callbacks=callbacks, ) _reduce_llm = reduce_llm or llm reduce_chain = LLMChain( llm=_reduce_llm, prompt=combine_prompt, verbose=verbose, # type: ignore[arg-type] - callbacks=callbacks, # type: ignore[arg-type] + callbacks=callbacks, ) # TODO: document prompt combine_documents_chain = StuffDocumentsChain( diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py index bc51a656243..bb8b899a58b 100644 --- a/libs/langchain/langchain/chat_models/base.py +++ b/libs/langchain/langchain/chat_models/base.py @@ -41,7 +41,7 @@ __all__ = [ @overload -def init_chat_model( # type: ignore[overload-overlap] +def init_chat_model( model: str, *, model_provider: Optional[str] = None, @@ -347,7 +347,7 @@ def _init_chat_model_helper( _check_pkg("langchain_anthropic") from langchain_anthropic import ChatAnthropic - return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg] + return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg,unused-ignore] elif model_provider == "azure_openai": _check_pkg("langchain_openai") from langchain_openai import AzureChatOpenAI @@ -402,7 +402,7 @@ def _init_chat_model_helper( _check_pkg("langchain_mistralai") from langchain_mistralai import ChatMistralAI - return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg] + return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg,unused-ignore] elif model_provider == "huggingface": _check_pkg("langchain_huggingface") from langchain_huggingface import ChatHuggingFace diff --git a/libs/langchain/langchain/embeddings/__init__.py b/libs/langchain/langchain/embeddings/__init__.py index 08bb679a552..8c8ef661c53 100644 --- a/libs/langchain/langchain/embeddings/__init__.py +++ b/libs/langchain/langchain/embeddings/__init__.py @@ -87,7 +87,7 @@ class HypotheticalDocumentEmbedder: ) from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H - return H(*args, **kwargs) # type: ignore + return H(*args, **kwargs) # type: ignore[return-value] @classmethod def from_llm(cls, *args: Any, **kwargs: Any) -> Any: diff --git a/libs/langchain/langchain/evaluation/comparison/eval_chain.py b/libs/langchain/langchain/evaluation/comparison/eval_chain.py index d3a1221e25c..022f335d534 100644 --- a/libs/langchain/langchain/evaluation/comparison/eval_chain.py +++ b/libs/langchain/langchain/evaluation/comparison/eval_chain.py @@ -95,7 +95,7 @@ def resolve_pairwise_criteria( return criteria_ -class PairwiseStringResultOutputParser(BaseOutputParser[dict]): # type: ignore[override] +class PairwiseStringResultOutputParser(BaseOutputParser[dict]): """A parser for the output of the PairwiseStringEvalChain. Attributes: @@ -151,7 +151,7 @@ class PairwiseStringResultOutputParser(BaseOutputParser[dict]): # type: ignore[ } -class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): # type: ignore[override] +class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): """A chain for comparing two outputs, such as the outputs of two models, prompts, or outputs of a single model on similar inputs. @@ -391,7 +391,7 @@ Performance may be significantly worse with other models." return self._prepare_output(result) -class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain): # type: ignore[override] +class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain): """A chain for comparing two outputs, such as the outputs of two models, prompts, or outputs of a single model on similar inputs, with labeled preferences. diff --git a/libs/langchain/langchain/evaluation/criteria/eval_chain.py b/libs/langchain/langchain/evaluation/criteria/eval_chain.py index 1189850cf75..064bb82747e 100644 --- a/libs/langchain/langchain/evaluation/criteria/eval_chain.py +++ b/libs/langchain/langchain/evaluation/criteria/eval_chain.py @@ -165,7 +165,7 @@ def resolve_criteria( return criteria_ -class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignore[override] +class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): """LLM Chain for evaluating runs against criteria. Parameters @@ -509,7 +509,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignor return self._prepare_output(result) -class LabeledCriteriaEvalChain(CriteriaEvalChain): # type: ignore[override] +class LabeledCriteriaEvalChain(CriteriaEvalChain): """Criteria evaluation chain that requires references.""" @classmethod diff --git a/libs/langchain/langchain/evaluation/exact_match/base.py b/libs/langchain/langchain/evaluation/exact_match/base.py index df4c4f845b9..818f5661cbc 100644 --- a/libs/langchain/langchain/evaluation/exact_match/base.py +++ b/libs/langchain/langchain/evaluation/exact_match/base.py @@ -68,7 +68,7 @@ class ExactMatchStringEvaluator(StringEvaluator): """ return "exact_match" - def _evaluate_strings( # type: ignore[arg-type,override] + def _evaluate_strings( # type: ignore[override] self, *, prediction: str, diff --git a/libs/langchain/langchain/evaluation/loading.py b/libs/langchain/langchain/evaluation/loading.py index bf12408e12f..81eb421668a 100644 --- a/libs/langchain/langchain/evaluation/loading.py +++ b/libs/langchain/langchain/evaluation/loading.py @@ -148,9 +148,7 @@ def load_evaluator( "specify a language model explicitly." ) - llm = llm or ChatOpenAI( # type: ignore[call-arg] - model="gpt-4", seed=42, temperature=0 - ) + llm = llm or ChatOpenAI(model="gpt-4", seed=42, temperature=0) except Exception as e: raise ValueError( f"Evaluation with the {evaluator_cls} requires a " diff --git a/libs/langchain/langchain/evaluation/regex_match/base.py b/libs/langchain/langchain/evaluation/regex_match/base.py index 2b9f6a60b24..71b9c9e5745 100644 --- a/libs/langchain/langchain/evaluation/regex_match/base.py +++ b/libs/langchain/langchain/evaluation/regex_match/base.py @@ -65,7 +65,7 @@ class RegexMatchStringEvaluator(StringEvaluator): """ return "regex_match" - def _evaluate_strings( # type: ignore[arg-type,override] + def _evaluate_strings( # type: ignore[override] self, *, prediction: str, diff --git a/libs/langchain/langchain/evaluation/scoring/eval_chain.py b/libs/langchain/langchain/evaluation/scoring/eval_chain.py index 9c713d324bd..a9d9e5c5c5d 100644 --- a/libs/langchain/langchain/evaluation/scoring/eval_chain.py +++ b/libs/langchain/langchain/evaluation/scoring/eval_chain.py @@ -144,7 +144,7 @@ class ScoreStringResultOutputParser(BaseOutputParser[dict]): } -class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignore[override] +class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain): """A chain for scoring on a scale of 1-10 the output of a model. Attributes: @@ -396,7 +396,7 @@ Performance may be significantly worse with other models." return self._prepare_output(result) -class LabeledScoreStringEvalChain(ScoreStringEvalChain): # type: ignore[override] +class LabeledScoreStringEvalChain(ScoreStringEvalChain): """A chain for scoring the output of a model on a scale of 1-10. Attributes: diff --git a/libs/langchain/langchain/indexes/_sql_record_manager.py b/libs/langchain/langchain/indexes/_sql_record_manager.py index c36cbe737a5..d18546f081c 100644 --- a/libs/langchain/langchain/indexes/_sql_record_manager.py +++ b/libs/langchain/langchain/indexes/_sql_record_manager.py @@ -45,7 +45,7 @@ try: from sqlalchemy.ext.asyncio import async_sessionmaker except ImportError: # dummy for sqlalchemy < 2 - async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore + async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore[assignment,misc] Base = declarative_base() diff --git a/libs/langchain/langchain/memory/vectorstore_token_buffer_memory.py b/libs/langchain/langchain/memory/vectorstore_token_buffer_memory.py index 2faf27e7ca7..e0c04f7ac4d 100644 --- a/libs/langchain/langchain/memory/vectorstore_token_buffer_memory.py +++ b/libs/langchain/langchain/memory/vectorstore_token_buffer_memory.py @@ -109,7 +109,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory): previous_history_template: str = DEFAULT_HISTORY_TEMPLATE split_chunk_size: int = 1000 - _memory_retriever: VectorStoreRetrieverMemory = PrivateAttr(default=None) # type: ignore + _memory_retriever: VectorStoreRetrieverMemory = PrivateAttr(default=None) # type: ignore[assignment] _timestamps: list[datetime] = PrivateAttr(default_factory=list) @property diff --git a/libs/langchain/langchain/pydantic_v1/__init__.py b/libs/langchain/langchain/pydantic_v1/__init__.py index c329193ffd3..2cabc6622be 100644 --- a/libs/langchain/langchain/pydantic_v1/__init__.py +++ b/libs/langchain/langchain/pydantic_v1/__init__.py @@ -12,12 +12,7 @@ from langchain_core._api import warn_deprecated # * Creating namespaces for pydantic v1 and v2 should allow us to write code that # unambiguously uses either v1 or v2 API. # * This change is easier to roll out and roll back. - -try: - from pydantic.v1 import * # noqa: F403 -except ImportError: - from pydantic import * # type: ignore # noqa: F403 - +from pydantic.v1 import * # noqa: F403 try: _PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0]) diff --git a/libs/langchain/langchain/pydantic_v1/dataclasses.py b/libs/langchain/langchain/pydantic_v1/dataclasses.py index b057c5bb6cc..cdadd11d63d 100644 --- a/libs/langchain/langchain/pydantic_v1/dataclasses.py +++ b/libs/langchain/langchain/pydantic_v1/dataclasses.py @@ -1,9 +1,5 @@ from langchain_core._api import warn_deprecated - -try: - from pydantic.v1.dataclasses import * # noqa: F403 -except ImportError: - from pydantic.dataclasses import * # type: ignore # noqa: F403 +from pydantic.v1.dataclasses import * # noqa: F403 warn_deprecated( "0.3.0", diff --git a/libs/langchain/langchain/pydantic_v1/main.py b/libs/langchain/langchain/pydantic_v1/main.py index d366b5a7ea4..d8630658b2a 100644 --- a/libs/langchain/langchain/pydantic_v1/main.py +++ b/libs/langchain/langchain/pydantic_v1/main.py @@ -1,9 +1,5 @@ from langchain_core._api import warn_deprecated - -try: - from pydantic.v1.main import * # noqa: F403 -except ImportError: - from pydantic.main import * # type: ignore # noqa: F403 +from pydantic.v1.main import * # noqa: F403 warn_deprecated( "0.3.0", diff --git a/libs/langchain/langchain/retrievers/document_compressors/chain_extract.py b/libs/langchain/langchain/retrievers/document_compressors/chain_extract.py index 6bdf86572d5..a0892504d0a 100644 --- a/libs/langchain/langchain/retrievers/document_compressors/chain_extract.py +++ b/libs/langchain/langchain/retrievers/document_compressors/chain_extract.py @@ -99,7 +99,7 @@ class LLMChainExtractor(BaseDocumentCompressor): if len(outputs[i]) == 0: continue compressed_docs.append( - Document(page_content=outputs[i], metadata=doc.metadata) # type: ignore[arg-type] + Document(page_content=outputs[i], metadata=doc.metadata) ) return compressed_docs diff --git a/libs/langchain/langchain/retrievers/ensemble.py b/libs/langchain/langchain/retrievers/ensemble.py index 3c3722654b9..1fe344c0e41 100644 --- a/libs/langchain/langchain/retrievers/ensemble.py +++ b/libs/langchain/langchain/retrievers/ensemble.py @@ -272,7 +272,7 @@ class EnsembleRetriever(BaseRetriever): # Enforce that retrieved docs are Documents for each list in retriever_docs for i in range(len(retriever_docs)): retriever_docs[i] = [ - Document(page_content=doc) if not isinstance(doc, Document) else doc # type: ignore[arg-type] + Document(page_content=doc) if not isinstance(doc, Document) else doc for doc in retriever_docs[i] ] diff --git a/libs/langchain/langchain/retrievers/self_query/base.py b/libs/langchain/langchain/retrievers/self_query/base.py index adfb1550477..801990d1859 100644 --- a/libs/langchain/langchain/retrievers/self_query/base.py +++ b/libs/langchain/langchain/retrievers/self_query/base.py @@ -180,7 +180,7 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: return ChromaTranslator() try: - from langchain_postgres import PGVector # type: ignore[no-redef] + from langchain_postgres import PGVector from langchain_postgres import PGVectorTranslator as NewPGVectorTranslator except ImportError: pass diff --git a/libs/langchain/langchain/smith/evaluation/string_run_evaluator.py b/libs/langchain/langchain/smith/evaluation/string_run_evaluator.py index b7ee1232fa4..f84ba37e583 100644 --- a/libs/langchain/langchain/smith/evaluation/string_run_evaluator.py +++ b/libs/langchain/langchain/smith/evaluation/string_run_evaluator.py @@ -239,7 +239,7 @@ class StringExampleMapper(Serializable): return self.map(example) -class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, override] +class StringRunEvaluatorChain(Chain, RunEvaluator): """Evaluate Run and optional examples.""" run_mapper: StringRunMapper diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index a50d73f3560..17652111fde 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -91,7 +91,7 @@ lint = [ "cffi; python_version >= \"3.10\"", ] typing = [ - "mypy<2.0,>=1.10", + "mypy<2.0,>=1.15", "types-pyyaml<7.0.0.0,>=6.0.12.2", "types-requests<3.0.0.0,>=2.28.11.5", "types-toml<1.0.0.0,>=0.10.8.1", @@ -126,7 +126,7 @@ exclude = ["tests/integration_tests/examples/non-utf8-encoding.py"] [tool.mypy] ignore_missing_imports = "True" disallow_untyped_defs = "True" -exclude = ["notebooks", "examples", "example_data"] +warn_unused_ignores = "True" [tool.codespell] skip = ".git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package-lock.json,example_data,_dist,examples,*.trig" @@ -134,7 +134,7 @@ ignore-regex = ".*(Stati Uniti|Tense=Pres).*" ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin" [tool.ruff.lint] -select = ["E", "F", "I", "T201", "D", "UP"] +select = ["E", "F", "I", "PGH003", "T201", "D", "UP"] ignore = ["UP007", ] pydocstyle = { convention = "google" } diff --git a/libs/langchain/tests/mock_servers/robot/server.py b/libs/langchain/tests/mock_servers/robot/server.py index 137e1134b8b..58fbb4d3085 100644 --- a/libs/langchain/tests/mock_servers/robot/server.py +++ b/libs/langchain/tests/mock_servers/robot/server.py @@ -200,6 +200,6 @@ def custom_openapi() -> dict[str, Any]: # This lets us prevent the "servers" configuration from being overwritten in # the auto-generated OpenAPI schema -app.openapi = custom_openapi # type: ignore +app.openapi = custom_openapi if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=PORT) diff --git a/libs/langchain/tests/unit_tests/agents/test_agent.py b/libs/langchain/tests/unit_tests/agents/test_agent.py index acf95c23e2e..4706d1470f9 100644 --- a/libs/langchain/tests/unit_tests/agents/test_agent.py +++ b/libs/langchain/tests/unit_tests/agents/test_agent.py @@ -392,8 +392,8 @@ def test_agent_with_new_prefix_suffix() -> None: ) # avoids "BasePromptTemplate" has no attribute "template" error - assert hasattr(agent.agent.llm_chain.prompt, "template") # type: ignore - prompt_str = agent.agent.llm_chain.prompt.template # type: ignore + assert hasattr(agent.agent.llm_chain.prompt, "template") # type: ignore[union-attr] + prompt_str = agent.agent.llm_chain.prompt.template # type: ignore[union-attr] assert prompt_str.startswith(prefix), "Prompt does not start with prefix" assert prompt_str.endswith(suffix), "Prompt does not end with suffix" @@ -463,7 +463,7 @@ async def test_runnable_agent() -> None: return AgentFinish(return_values={"foo": "meow"}, log="hard-coded-message") agent = template | model | fake_parse - executor = AgentExecutor(agent=agent, tools=[]) # type: ignore[arg-type] + executor = AgentExecutor(agent=agent, tools=[]) # Invoke result: Any = await asyncio.to_thread(executor.invoke, {"question": "hello"}) @@ -527,7 +527,7 @@ async def test_runnable_agent() -> None: run_log = result else: # `+` is defined for RunLogPatch - run_log = run_log + result # type: ignore[union-attr] + run_log = run_log + result assert isinstance(run_log, RunLog) @@ -583,7 +583,7 @@ async def test_runnable_agent_with_function_calls() -> None: return "Spying from under the bed." agent = template | model | fake_parse - executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item] + executor = AgentExecutor(agent=agent, tools=[find_pet]) # Invoke result = await asyncio.to_thread(executor.invoke, {"question": "hello"}) @@ -701,7 +701,7 @@ async def test_runnable_with_multi_action_per_step() -> None: return "purrrr" agent = template | model | fake_parse - executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item] + executor = AgentExecutor(agent=agent, tools=[find_pet]) # Invoke result = await asyncio.to_thread(executor.invoke, {"question": "hello"}) @@ -852,10 +852,10 @@ async def test_openai_agent_with_streaming() -> None: # decorator. agent = create_openai_functions_agent( model, - [find_pet], # type: ignore[list-item] + [find_pet], template, ) - executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item] + executor = AgentExecutor(agent=agent, tools=[find_pet]) # Invoke result = await asyncio.to_thread(executor.invoke, {"question": "hello"}) @@ -1006,7 +1006,7 @@ def _make_tools_invocation(name_to_arguments: dict[str, dict[str, Any]]) -> AIMe additional_kwargs={ "tool_calls": raw_tool_calls, }, - tool_calls=tool_calls, # type: ignore[arg-type] + tool_calls=tool_calls, ) @@ -1024,7 +1024,7 @@ async def test_openai_agent_tools_agent() -> None: ] ) - GenericFakeChatModel.bind_tools = lambda self, x: self # type: ignore + GenericFakeChatModel.bind_tools = lambda self, x: self # type: ignore[assignment,misc] model = GenericFakeChatModel(messages=infinite_cycle) @tool @@ -1053,16 +1053,16 @@ async def test_openai_agent_tools_agent() -> None: # decorator. openai_agent = create_openai_tools_agent( model, - [find_pet], # type: ignore[list-item] + [find_pet], template, ) tool_calling_agent = create_tool_calling_agent( model, - [find_pet], # type: ignore[list-item] + [find_pet], template, ) for agent in [openai_agent, tool_calling_agent]: - executor = AgentExecutor(agent=agent, tools=[find_pet]) # type: ignore[arg-type, list-item] + executor = AgentExecutor(agent=agent, tools=[find_pet]) # Invoke result = await asyncio.to_thread(executor.invoke, {"question": "hello"}) diff --git a/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py b/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py index dd6881e8acd..ebd52ff92c5 100644 --- a/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py +++ b/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py @@ -238,7 +238,7 @@ def test_agent_iterator_properties_and_setters() -> None: assert isinstance(agent_iter.tags, type(None)) assert isinstance(agent_iter.agent_executor, AgentExecutor) - agent_iter.inputs = "New input" # type: ignore + agent_iter.inputs = "New input" # type: ignore[assignment] assert isinstance(agent_iter.inputs, dict) agent_iter.callbacks = [FakeCallbackHandler()] diff --git a/libs/langchain/tests/unit_tests/agents/test_initialize.py b/libs/langchain/tests/unit_tests/agents/test_initialize.py index f898208a292..39473af4ad0 100644 --- a/libs/langchain/tests/unit_tests/agents/test_initialize.py +++ b/libs/langchain/tests/unit_tests/agents/test_initialize.py @@ -17,7 +17,7 @@ def test_initialize_agent_with_str_agent_type() -> None: """Test initialize_agent with a string.""" fake_llm = FakeLLM() agent_executor = initialize_agent( - [my_tool], # type: ignore[list-item] + [my_tool], fake_llm, "zero-shot-react-description", # type: ignore[arg-type] ) diff --git a/libs/langchain/tests/unit_tests/agents/test_mrkl_output_parser.py b/libs/langchain/tests/unit_tests/agents/test_mrkl_output_parser.py index f4ac56009c1..1482e665910 100644 --- a/libs/langchain/tests/unit_tests/agents/test_mrkl_output_parser.py +++ b/libs/langchain/tests/unit_tests/agents/test_mrkl_output_parser.py @@ -16,7 +16,7 @@ def test_valid_action_and_action_input_parse() -> None: Action: foo Action Input: bar""" - agent_action: AgentAction = mrkl_output_parser.parse(llm_output) # type: ignore + agent_action: AgentAction = mrkl_output_parser.parse(llm_output) # type: ignore[assignment] assert agent_action.tool == "foo" assert agent_action.tool_input == "bar" @@ -24,7 +24,7 @@ def test_valid_action_and_action_input_parse() -> None: def test_valid_final_answer_parse() -> None: llm_output = """Final Answer: The best pizza to eat is margaritta """ - agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore + agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore[assignment] assert ( agent_finish.return_values.get("output") == "The best pizza to eat is margaritta" @@ -59,7 +59,7 @@ def test_final_answer_before_parsable_action() -> None: Action: foo Action Input: bar """ - agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore + agent_finish: AgentFinish = mrkl_output_parser.parse(llm_output) # type: ignore[assignment] assert ( agent_finish.return_values.get("output") == "The best pizza to eat is margaritta" diff --git a/libs/langchain/tests/unit_tests/agents/test_openai_assistant.py b/libs/langchain/tests/unit_tests/agents/test_openai_assistant.py index 45fcea4ad2a..f29674672e8 100644 --- a/libs/langchain/tests/unit_tests/agents/test_openai_assistant.py +++ b/libs/langchain/tests/unit_tests/agents/test_openai_assistant.py @@ -11,7 +11,7 @@ def _create_mock_client(*args: Any, use_async: bool = False, **kwargs: Any) -> A client = AsyncMock() if use_async else MagicMock() mock_assistant = MagicMock() mock_assistant.id = "abc123" - client.beta.assistants.create.return_value = mock_assistant # type: ignore + client.beta.assistants.create.return_value = mock_assistant return client diff --git a/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py b/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py index fe3baa94634..16733d25cb3 100644 --- a/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py +++ b/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py @@ -254,7 +254,7 @@ class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin): ) -> Any: self.on_retriever_error_common() - def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore + def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore[override] return self @@ -388,5 +388,5 @@ class FakeAsyncCallbackHandler(AsyncCallbackHandler, BaseFakeCallbackHandlerMixi ) -> None: self.on_text_common() - def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore + def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore[override] return self diff --git a/libs/langchain/tests/unit_tests/chat_models/test_base.py b/libs/langchain/tests/unit_tests/chat_models/test_base.py index 2e6fc4f4521..3ed6bc47176 100644 --- a/libs/langchain/tests/unit_tests/chat_models/test_base.py +++ b/libs/langchain/tests/unit_tests/chat_models/test_base.py @@ -173,7 +173,7 @@ def test_configurable_with_default() -> None: for method in ("get_num_tokens", "get_num_tokens_from_messages", "dict"): assert hasattr(model, method) - assert model.model_name == "gpt-4o" # type: ignore[attr-defined] + assert model.model_name == "gpt-4o" model_with_tools = model.bind_tools( [{"name": "foo", "description": "foo", "parameters": {}}] diff --git a/libs/langchain/tests/unit_tests/evaluation/agents/test_eval_chain.py b/libs/langchain/tests/unit_tests/evaluation/agents/test_eval_chain.py index feeb18efea9..7fd727e8115 100644 --- a/libs/langchain/tests/unit_tests/evaluation/agents/test_eval_chain.py +++ b/libs/langchain/tests/unit_tests/evaluation/agents/test_eval_chain.py @@ -123,7 +123,7 @@ def test_trajectory_eval_chain( }, sequential_responses=True, ) - chain = TrajectoryEvalChain.from_llm(llm=llm, agent_tools=[foo]) # type: ignore + chain = TrajectoryEvalChain.from_llm(llm=llm, agent_tools=[foo]) # Test when ref is not provided res = chain.evaluate_agent_trajectory( input="What is your favorite food?", @@ -151,7 +151,7 @@ def test_trajectory_eval_chain_no_tools( }, sequential_responses=True, ) - chain = TrajectoryEvalChain.from_llm(llm=llm) # type: ignore + chain = TrajectoryEvalChain.from_llm(llm=llm) res = chain.evaluate_agent_trajectory( input="What is your favorite food?", agent_trajectory=intermediate_steps, @@ -175,7 +175,7 @@ def test_old_api_works(intermediate_steps: list[tuple[AgentAction, str]]) -> Non }, sequential_responses=True, ) - chain = TrajectoryEvalChain.from_llm(llm=llm) # type: ignore + chain = TrajectoryEvalChain.from_llm(llm=llm) res = chain( { "question": "What is your favorite food?", diff --git a/libs/langchain/tests/unit_tests/evaluation/criteria/test_eval_chain.py b/libs/langchain/tests/unit_tests/evaluation/criteria/test_eval_chain.py index a96120b3480..a58e53d97df 100644 --- a/libs/langchain/tests/unit_tests/evaluation/criteria/test_eval_chain.py +++ b/libs/langchain/tests/unit_tests/evaluation/criteria/test_eval_chain.py @@ -14,7 +14,6 @@ from tests.unit_tests.llms.fake_llm import FakeLLM def test_resolve_criteria_str() -> None: - # type: ignore assert CriteriaEvalChain.resolve_criteria("helpfulness") == { "helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS] } diff --git a/libs/langchain/tests/unit_tests/evaluation/qa/test_eval_chain.py b/libs/langchain/tests/unit_tests/evaluation/qa/test_eval_chain.py index 75cba6d704d..c2db3b2be59 100644 --- a/libs/langchain/tests/unit_tests/evaluation/qa/test_eval_chain.py +++ b/libs/langchain/tests/unit_tests/evaluation/qa/test_eval_chain.py @@ -61,7 +61,7 @@ def test_load_criteria_evaluator() -> None: # Patch the env with an openai-api-key with patch.dict(os.environ, {"OPENAI_API_KEY": "foo"}): # Check it can load using a string arg (even if that's not how it's typed) - load_evaluator("criteria") # type: ignore + load_evaluator("criteria") # type: ignore[arg-type] @pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain]) @@ -78,7 +78,7 @@ def test_returns_expected_results( fake_llm = FakeLLM( queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True ) - chain = chain_cls.from_llm(fake_llm) # type: ignore + chain = chain_cls.from_llm(fake_llm) # type: ignore[attr-defined] results = chain.evaluate_strings( prediction="my prediction", reference="my reference", input="my input" ) diff --git a/libs/langchain/tests/unit_tests/indexes/test_hashed_document.py b/libs/langchain/tests/unit_tests/indexes/test_hashed_document.py index 6c79fe6624a..27ffb32b306 100644 --- a/libs/langchain/tests/unit_tests/indexes/test_hashed_document.py +++ b/libs/langchain/tests/unit_tests/indexes/test_hashed_document.py @@ -14,9 +14,9 @@ def test_hashed_document_hashing() -> None: def test_hashing_with_missing_content() -> None: """Check that ValueError is raised if page_content is missing.""" with pytest.raises(TypeError): - _HashedDocument( + _HashedDocument( # type: ignore[call-arg] metadata={"key": "value"}, - ) # type: ignore + ) def test_uid_auto_assigned_to_hash() -> None: diff --git a/libs/langchain/tests/unit_tests/indexes/test_indexing.py b/libs/langchain/tests/unit_tests/indexes/test_indexing.py index 5428f4b37d5..1e9df94e698 100644 --- a/libs/langchain/tests/unit_tests/indexes/test_indexing.py +++ b/libs/langchain/tests/unit_tests/indexes/test_indexing.py @@ -57,7 +57,7 @@ class InMemoryVectorStore(VectorStore): for _id in ids: self.store.pop(_id, None) - def add_documents( # type: ignore + def add_documents( self, documents: Sequence[Document], *, @@ -140,7 +140,7 @@ def record_manager() -> SQLRecordManager: return record_manager -@pytest_asyncio.fixture # type: ignore +@pytest_asyncio.fixture @pytest.mark.requires("aiosqlite") async def arecord_manager() -> SQLRecordManager: """Timestamped set fixture.""" @@ -292,7 +292,7 @@ def test_index_simple_delete_full( doc_texts = set( # Ignoring type since doc should be in the store and not a None - vector_store.store.get(uid).page_content # type: ignore + vector_store.store.get(uid).page_content # type: ignore[union-attr] for uid in vector_store.store ) assert doc_texts == {"mutated document 1", "This is another document."} @@ -368,7 +368,7 @@ async def test_aindex_simple_delete_full( doc_texts = set( # Ignoring type since doc should be in the store and not a None - vector_store.store.get(uid).page_content # type: ignore + vector_store.store.get(uid).page_content # type: ignore[union-attr] for uid in vector_store.store ) assert doc_texts == {"mutated document 1", "This is another document."} @@ -661,7 +661,7 @@ def test_incremental_delete( doc_texts = set( # Ignoring type since doc should be in the store and not a None - vector_store.store.get(uid).page_content # type: ignore + vector_store.store.get(uid).page_content # type: ignore[union-attr] for uid in vector_store.store ) assert doc_texts == {"This is another document.", "This is a test document."} @@ -720,7 +720,7 @@ def test_incremental_delete( doc_texts = set( # Ignoring type since doc should be in the store and not a None - vector_store.store.get(uid).page_content # type: ignore + vector_store.store.get(uid).page_content # type: ignore[union-attr] for uid in vector_store.store ) assert doc_texts == { @@ -788,7 +788,7 @@ def test_incremental_indexing_with_batch_size( doc_texts = set( # Ignoring type since doc should be in the store and not a None - vector_store.store.get(uid).page_content # type: ignore + vector_store.store.get(uid).page_content # type: ignore[union-attr] for uid in vector_store.store ) assert doc_texts == {"1", "2", "3", "4"} @@ -838,7 +838,7 @@ def test_incremental_delete_with_batch_size( doc_texts = set( # Ignoring type since doc should be in the store and not a None - vector_store.store.get(uid).page_content # type: ignore + vector_store.store.get(uid).page_content # type: ignore[union-attr] for uid in vector_store.store ) assert doc_texts == {"1", "2", "3", "4"} @@ -984,7 +984,7 @@ async def test_aincremental_delete( doc_texts = set( # Ignoring type since doc should be in the store and not a None - vector_store.store.get(uid).page_content # type: ignore + vector_store.store.get(uid).page_content # type: ignore[union-attr] for uid in vector_store.store ) assert doc_texts == {"This is another document.", "This is a test document."} @@ -1043,7 +1043,7 @@ async def test_aincremental_delete( doc_texts = set( # Ignoring type since doc should be in the store and not a None - vector_store.store.get(uid).page_content # type: ignore + vector_store.store.get(uid).page_content # type: ignore[union-attr] for uid in vector_store.store ) assert doc_texts == { diff --git a/libs/langchain/tests/unit_tests/load/test_dump.py b/libs/langchain/tests/unit_tests/load/test_dump.py index 6a4984ea21f..8b96ee78c95 100644 --- a/libs/langchain/tests/unit_tests/load/test_dump.py +++ b/libs/langchain/tests/unit_tests/load/test_dump.py @@ -139,7 +139,7 @@ def test_aliases_hidden() -> None: dumped = json.loads(dumps(test_class, pretty=True)) # Check by alias - test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world") # type: ignore[call-arg] + test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world") dumped = json.loads(dumps(test_class, pretty=True)) expected_dump = { "lc": 1, diff --git a/libs/langchain/tests/unit_tests/load/test_load.py b/libs/langchain/tests/unit_tests/load/test_load.py index 5e95b024016..f36a2d24bac 100644 --- a/libs/langchain/tests/unit_tests/load/test_load.py +++ b/libs/langchain/tests/unit_tests/load/test_load.py @@ -25,7 +25,7 @@ def test_loads_openai_llm() -> None: llm = CommunityOpenAI( model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8 - ) # type: ignore[call-arg] + ) llm_string = dumps(llm) llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"}) @@ -41,7 +41,7 @@ def test_loads_llmchain() -> None: llm = CommunityOpenAI( model="davinci", temperature=0.5, openai_api_key="hello", top_p=0.8 - ) # type: ignore[call-arg] + ) prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain) @@ -64,7 +64,7 @@ def test_loads_llmchain_env() -> None: if not has_env: os.environ["OPENAI_API_KEY"] = "env_variable" - llm = OpenAI(model="davinci", temperature=0.5, top_p=0.8) # type: ignore[call-arg] + llm = OpenAI(model="davinci", temperature=0.5, top_p=0.8) prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_string = dumps(chain) @@ -82,7 +82,7 @@ def test_loads_llmchain_env() -> None: @pytest.mark.requires("openai") def test_loads_llmchain_with_non_serializable_arg() -> None: - llm = CommunityOpenAI( # type: ignore[call-arg] + llm = CommunityOpenAI( model="davinci", temperature=0.5, openai_api_key="hello", @@ -99,7 +99,7 @@ def test_loads_llmchain_with_non_serializable_arg() -> None: def test_load_openai_llm() -> None: from langchain_openai import OpenAI - llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg] + llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") llm_obj = dumpd(llm) llm2 = load(llm_obj, secrets_map={"OPENAI_API_KEY": "hello"}) @@ -112,7 +112,7 @@ def test_load_openai_llm() -> None: def test_load_llmchain() -> None: from langchain_openai import OpenAI - llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg] + llm = CommunityOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_obj = dumpd(chain) @@ -135,7 +135,7 @@ def test_load_llmchain_env() -> None: if not has_env: os.environ["OPENAI_API_KEY"] = "env_variable" - llm = CommunityOpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg] + llm = CommunityOpenAI(model="davinci", temperature=0.5) prompt = PromptTemplate.from_template("hello {name}!") chain = LLMChain(llm=llm, prompt=prompt) chain_obj = dumpd(chain) diff --git a/libs/langchain/tests/unit_tests/test_imports.py b/libs/langchain/tests/unit_tests/test_imports.py index 131bec4c4b6..8c17cf72510 100644 --- a/libs/langchain/tests/unit_tests/test_imports.py +++ b/libs/langchain/tests/unit_tests/test_imports.py @@ -141,7 +141,7 @@ def _dict_from_ast(node: ast.Dict) -> dict[str, str]: """ result: dict[str, str] = {} for key, value in zip(node.keys, node.values): - py_key = _literal_eval_str(key) # type: ignore + py_key = _literal_eval_str(key) # type: ignore[arg-type] py_value = _literal_eval_str(value) result[py_key] = py_value return result diff --git a/libs/langchain/tests/unit_tests/tools/test_render.py b/libs/langchain/tests/unit_tests/tools/test_render.py index 5b61c3ca6a8..66df360c19d 100644 --- a/libs/langchain/tests/unit_tests/tools/test_render.py +++ b/libs/langchain/tests/unit_tests/tools/test_render.py @@ -21,7 +21,7 @@ def calculator(expression: str) -> str: @pytest.fixture def tools() -> list[BaseTool]: - return [search, calculator] # type: ignore + return [search, calculator] def test_render_text_description(tools: list[BaseTool]) -> None: diff --git a/libs/langchain/uv.lock b/libs/langchain/uv.lock index 75427b3d5af..8394efee3aa 100644 --- a/libs/langchain/uv.lock +++ b/libs/langchain/uv.lock @@ -2568,7 +2568,7 @@ test-integration = [ typing = [ { name = "langchain-core", editable = "../core" }, { name = "langchain-text-splitters", editable = "../text-splitters" }, - { name = "mypy", specifier = ">=1.10,<2.0" }, + { name = "mypy", specifier = ">=1.15,<2.0" }, { name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" }, { name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.26.4" }, { name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" }, @@ -2705,7 +2705,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.51" +version = "0.3.52" source = { editable = "../core" } dependencies = [ { name = "jsonpatch" }, @@ -2745,6 +2745,8 @@ test = [ { name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" }, { name = "pytest", specifier = ">=8,<9" }, { name = "pytest-asyncio", specifier = ">=0.21.1,<1.0.0" }, + { name = "pytest-benchmark" }, + { name = "pytest-codspeed" }, { name = "pytest-mock", specifier = ">=3.10.0,<4.0.0" }, { name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" }, { name = "pytest-watcher", specifier = ">=0.3.4,<1.0.0" }, @@ -2755,8 +2757,7 @@ test = [ test-integration = [] typing = [ { name = "langchain-text-splitters", directory = "../text-splitters" }, - { name = "mypy", specifier = ">=1.10,<1.11" }, - { name = "types-jinja2", specifier = ">=2.11.9,<3.0.0" }, + { name = "mypy", specifier = ">=1.15,<1.16" }, { name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" }, { name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" }, ] @@ -2882,7 +2883,7 @@ wheels = [ [[package]] name = "langchain-openai" -version = "0.3.12" +version = "0.3.13" source = { editable = "../partners/openai" } dependencies = [ { name = "langchain-core" }, @@ -2944,7 +2945,7 @@ wheels = [ [[package]] name = "langchain-tests" -version = "0.3.17" +version = "0.3.18" source = { editable = "../standard-tests" } dependencies = [ { name = "httpx" },