diff --git a/libs/community/langchain_community/agent_toolkits/sql/base.py b/libs/community/langchain_community/agent_toolkits/sql/base.py index 5cacc0f9f0a..1f3ea6cbd93 100644 --- a/libs/community/langchain_community/agent_toolkits/sql/base.py +++ b/libs/community/langchain_community/agent_toolkits/sql/base.py @@ -196,7 +196,7 @@ def create_sql_agent( ] prompt = ChatPromptTemplate.from_messages(messages) agent = RunnableAgent( - runnable=create_openai_functions_agent(llm, tools, prompt), # type: ignore + runnable=create_openai_functions_agent(llm, tools, prompt), # type: ignore[arg-type] input_keys_arg=["input"], return_keys_arg=["output"], **kwargs, @@ -211,9 +211,9 @@ def create_sql_agent( ] prompt = ChatPromptTemplate.from_messages(messages) if agent_type == "openai-tools": - runnable = create_openai_tools_agent(llm, tools, prompt) # type: ignore + runnable = create_openai_tools_agent(llm, tools, prompt) # type: ignore[arg-type] else: - runnable = create_tool_calling_agent(llm, tools, prompt) # type: ignore + runnable = create_tool_calling_agent(llm, tools, prompt) # type: ignore[arg-type] agent = RunnableMultiActionAgent( # type: ignore[assignment] runnable=runnable, input_keys_arg=["input"], diff --git a/libs/community/langchain_community/agents/openai_assistant/base.py b/libs/community/langchain_community/agents/openai_assistant/base.py index f9006f66ca5..25ad90f17fe 100644 --- a/libs/community/langchain_community/agents/openai_assistant/base.py +++ b/libs/community/langchain_community/agents/openai_assistant/base.py @@ -135,7 +135,7 @@ def _get_assistants_tool( Dict[str, Any]: A dictionary of tools that are converted into OpenAI tools. """ if _is_assistants_builtin_tool(tool): - return tool # type: ignore + return tool # type: ignore[return-value] else: return convert_to_openai_tool(tool) @@ -288,7 +288,7 @@ class OpenAIAssistantV2Runnable(OpenAIAssistantRunnable): assistant = client.beta.assistants.create( name=name, instructions=instructions, - tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore + tools=[_get_assistants_tool(tool) for tool in tools], tool_resources=tool_resources, # type: ignore[arg-type] model=model, extra_body=extra_body, @@ -430,7 +430,7 @@ class OpenAIAssistantV2Runnable(OpenAIAssistantRunnable): assistant = await async_client.beta.assistants.create( name=name, instructions=instructions, - tools=openai_tools, # type: ignore + tools=openai_tools, tool_resources=tool_resources, # type: ignore[arg-type] model=model, ) diff --git a/libs/community/langchain_community/cache.py b/libs/community/langchain_community/cache.py index 1e1c46f6b3f..f685d0ce262 100644 --- a/libs/community/langchain_community/cache.py +++ b/libs/community/langchain_community/cache.py @@ -238,7 +238,7 @@ class InMemoryCache(BaseCache): Base = declarative_base() -class FullLLMCache(Base): # type: ignore +class FullLLMCache(Base): # type: ignore[misc,valid-type] """SQLite table for full LLM Cache (all generations).""" __tablename__ = "full_llm_cache" @@ -261,7 +261,7 @@ class SQLAlchemyCache(BaseCache): """Look up based on prompt and llm_string.""" stmt = ( select(self.cache_schema.response) - .where(self.cache_schema.prompt == prompt) # type: ignore + .where(self.cache_schema.prompt == prompt) .where(self.cache_schema.llm == llm_string) .order_by(self.cache_schema.idx) ) @@ -1531,7 +1531,7 @@ class CassandraSemanticCache(BaseCache): await self.table.aclear() -class FullMd5LLMCache(Base): # type: ignore +class FullMd5LLMCache(Base): # type: ignore[misc,valid-type] """SQLite table for full LLM Cache (all generations).""" __tablename__ = "full_md5_llm_cache" @@ -1583,7 +1583,7 @@ class SQLAlchemyMd5Cache(BaseCache): def _delete_previous(self, session: Session, prompt: str, llm_string: str) -> None: stmt = ( delete(self.cache_schema) - .where(self.cache_schema.prompt_md5 == self.get_md5(prompt)) # type: ignore + .where(self.cache_schema.prompt_md5 == self.get_md5(prompt)) .where(self.cache_schema.llm == llm_string) .where(self.cache_schema.prompt == prompt) ) @@ -1593,7 +1593,7 @@ class SQLAlchemyMd5Cache(BaseCache): prompt_pd5 = self.get_md5(prompt) stmt = ( select(self.cache_schema.response) - .where(self.cache_schema.prompt_md5 == prompt_pd5) # type: ignore + .where(self.cache_schema.prompt_md5 == prompt_pd5) .where(self.cache_schema.llm == llm_string) .where(self.cache_schema.prompt == prompt) .order_by(self.cache_schema.idx) @@ -1796,7 +1796,7 @@ class _CachedAwaitable: def __await__(self) -> Generator: if self.result is _unset: self.result = yield from self.awaitable.__await__() - return self.result # type: ignore + return self.result # type: ignore[return-value] def _reawaitable(func: Callable) -> Callable: diff --git a/libs/community/langchain_community/callbacks/comet_ml_callback.py b/libs/community/langchain_community/callbacks/comet_ml_callback.py index a05ba48a00e..341df4bcf0f 100644 --- a/libs/community/langchain_community/callbacks/comet_ml_callback.py +++ b/libs/community/langchain_community/callbacks/comet_ml_callback.py @@ -584,7 +584,7 @@ class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): ) _custom_metrics = custom_metrics if custom_metrics else self.custom_metrics - self.__init__( # type: ignore + self.__init__( # type: ignore[misc] task_type=_task_type, workspace=_workspace, project_name=_project_name, diff --git a/libs/community/langchain_community/callbacks/wandb_callback.py b/libs/community/langchain_community/callbacks/wandb_callback.py index 8fbaf93e8aa..68f704dd57a 100644 --- a/libs/community/langchain_community/callbacks/wandb_callback.py +++ b/libs/community/langchain_community/callbacks/wandb_callback.py @@ -580,7 +580,7 @@ class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): self.temp_dir.cleanup() self.reset_callback_meta() if reset: - self.__init__( # type: ignore + self.__init__( # type: ignore[misc] job_type=job_type if job_type else self.job_type, project=project if project else self.project, entity=entity if entity else self.entity, diff --git a/libs/community/langchain_community/chains/ernie_functions/base.py b/libs/community/langchain_community/chains/ernie_functions/base.py index 3959a838d52..e2a58548f30 100644 --- a/libs/community/langchain_community/chains/ernie_functions/base.py +++ b/libs/community/langchain_community/chains/ernie_functions/base.py @@ -352,7 +352,7 @@ def create_structured_output_runnable( class _OutputFormatter(BaseModel): """Output formatter. Should always be used to format your response to the user.""" # noqa: E501 - output: output_schema # type: ignore + output: output_schema # type: ignore[valid-type] function = _OutputFormatter output_parser = output_parser or PydanticAttrOutputFunctionsParser( @@ -537,7 +537,7 @@ def create_structured_output_chain( class _OutputFormatter(BaseModel): """Output formatter. Should always be used to format your response to the user.""" # noqa: E501 - output: output_schema # type: ignore + output: output_schema # type: ignore[valid-type] function = _OutputFormatter output_parser = output_parser or PydanticAttrOutputFunctionsParser( diff --git a/libs/community/langchain_community/chains/graph_qa/cypher.py b/libs/community/langchain_community/chains/graph_qa/cypher.py index 760ce667312..dbfde8d289b 100644 --- a/libs/community/langchain_community/chains/graph_qa/cypher.py +++ b/libs/community/langchain_community/chains/graph_qa/cypher.py @@ -316,7 +316,7 @@ class GraphCypherQAChain(Chain): MessagesPlaceholder(variable_name="function_response"), ] ) - qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore + qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore[operator] except (NotImplementedError, AttributeError): raise ValueError("Provided LLM does not support native tools/functions") else: @@ -404,15 +404,15 @@ class GraphCypherQAChain(Chain): intermediate_steps.append({"context": context}) if self.use_function_response: function_response = get_function_response(question, context) - final_result = self.qa_chain.invoke( # type: ignore + final_result = self.qa_chain.invoke( # type: ignore[assignment] {"question": question, "function_response": function_response}, ) else: - result = self.qa_chain.invoke( # type: ignore + result = self.qa_chain.invoke( {"question": question, "context": context}, callbacks=callbacks, ) - final_result = result[self.qa_chain.output_key] # type: ignore + final_result = result[self.qa_chain.output_key] # type: ignore[union-attr] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: diff --git a/libs/community/langchain_community/chains/graph_qa/memgraph.py b/libs/community/langchain_community/chains/graph_qa/memgraph.py index 02fa66992a2..e82c0b7c5d5 100644 --- a/libs/community/langchain_community/chains/graph_qa/memgraph.py +++ b/libs/community/langchain_community/chains/graph_qa/memgraph.py @@ -225,11 +225,11 @@ class MemgraphQAChain(Chain): MessagesPlaceholder(variable_name="function_response"), ] ) - qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore + qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore[operator] except (NotImplementedError, AttributeError): raise ValueError("Provided LLM does not support native tools/functions") else: - qa_chain = use_qa_llm_kwargs["prompt"] | qa_llm | StrOutputParser() # type: ignore + qa_chain = use_qa_llm_kwargs["prompt"] | qa_llm | StrOutputParser() prompt = use_cypher_llm_kwargs["prompt"] llm_to_use = cypher_llm if cypher_llm is not None else llm @@ -300,11 +300,11 @@ class MemgraphQAChain(Chain): intermediate_steps.append({"context": context}) if self.use_function_response: function_response = get_function_response(question, context) - result = self.qa_chain.invoke( # type: ignore + result = self.qa_chain.invoke( {"question": question, "function_response": function_response}, ) else: - result = self.qa_chain.invoke( # type: ignore + result = self.qa_chain.invoke( {"question": question, "context": context}, callbacks=callbacks, ) diff --git a/libs/community/langchain_community/chains/graph_qa/neptune_cypher.py b/libs/community/langchain_community/chains/graph_qa/neptune_cypher.py index e0f21ff477b..8a44315f070 100644 --- a/libs/community/langchain_community/chains/graph_qa/neptune_cypher.py +++ b/libs/community/langchain_community/chains/graph_qa/neptune_cypher.py @@ -67,11 +67,11 @@ def extract_cypher(text: str) -> str: def use_simple_prompt(llm: BaseLanguageModel) -> bool: """Decides whether to use the simple prompt""" - if llm._llm_type and "anthropic" in llm._llm_type: # type: ignore + if llm._llm_type and "anthropic" in llm._llm_type: # type: ignore[attr-defined] return True # Bedrock anthropic - if hasattr(llm, "model_id") and "anthropic" in llm.model_id: # type: ignore + if hasattr(llm, "model_id") and "anthropic" in llm.model_id: return True return False diff --git a/libs/community/langchain_community/chains/pebblo_retrieval/base.py b/libs/community/langchain_community/chains/pebblo_retrieval/base.py index 2d4b550f1a9..80c45329bc1 100644 --- a/libs/community/langchain_community/chains/pebblo_retrieval/base.py +++ b/libs/community/langchain_community/chains/pebblo_retrieval/base.py @@ -313,8 +313,12 @@ class PebbloRetrievalQA(Chain): ) @staticmethod - def _get_app_details( # type: ignore - app_name: str, owner: str, description: str, llm: BaseLanguageModel, **kwargs + def _get_app_details( + app_name: str, + owner: str, + description: str, + llm: BaseLanguageModel, + **kwargs: Any, ) -> App: """Fetch app details. Internal method. Returns: diff --git a/libs/community/langchain_community/chat_message_histories/cassandra.py b/libs/community/langchain_community/chat_message_histories/cassandra.py index 4ffd1ac8cee..34ccf215d57 100644 --- a/libs/community/langchain_community/chat_message_histories/cassandra.py +++ b/libs/community/langchain_community/chat_message_histories/cassandra.py @@ -81,7 +81,7 @@ class CassandraChatMessageHistory(BaseChatMessageHistory): ) @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve all session messages from DB""" # The latest are returned, in chronological order rows = self.table.get_partition( diff --git a/libs/community/langchain_community/chat_message_histories/file.py b/libs/community/langchain_community/chat_message_histories/file.py index d428ad66bbe..2c99144fcff 100644 --- a/libs/community/langchain_community/chat_message_histories/file.py +++ b/libs/community/langchain_community/chat_message_histories/file.py @@ -35,7 +35,7 @@ class FileChatMessageHistory(BaseChatMessageHistory): ) @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve the messages from the local file""" items = json.loads(self.file_path.read_text(encoding=self.encoding)) messages = messages_from_dict(items) diff --git a/libs/community/langchain_community/chat_message_histories/kafka.py b/libs/community/langchain_community/chat_message_histories/kafka.py index f4bb61f050a..0f90f096c61 100644 --- a/libs/community/langchain_community/chat_message_histories/kafka.py +++ b/libs/community/langchain_community/chat_message_histories/kafka.py @@ -334,7 +334,7 @@ class KafkaChatMessageHistory(BaseChatMessageHistory): ) @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """ Retrieve the messages for the session, from Kafka topic continuously from last consumed message. This method is stateful and maintains diff --git a/libs/community/langchain_community/chat_message_histories/mongodb.py b/libs/community/langchain_community/chat_message_histories/mongodb.py index b6d4a78a046..4ab6887657f 100644 --- a/libs/community/langchain_community/chat_message_histories/mongodb.py +++ b/libs/community/langchain_community/chat_message_histories/mongodb.py @@ -60,7 +60,7 @@ class MongoDBChatMessageHistory(BaseChatMessageHistory): self.collection.create_index("SessionId") @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve the messages from MongoDB""" from pymongo import errors diff --git a/libs/community/langchain_community/chat_message_histories/postgres.py b/libs/community/langchain_community/chat_message_histories/postgres.py index 7e01fc9a1c5..c7fd0f85b02 100644 --- a/libs/community/langchain_community/chat_message_histories/postgres.py +++ b/libs/community/langchain_community/chat_message_histories/postgres.py @@ -65,7 +65,7 @@ class PostgresChatMessageHistory(BaseChatMessageHistory): self.connection.commit() @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve the messages from PostgreSQL""" query = ( f"SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;" diff --git a/libs/community/langchain_community/chat_message_histories/rocksetdb.py b/libs/community/langchain_community/chat_message_histories/rocksetdb.py index 3e48de373ee..54016d8e153 100644 --- a/libs/community/langchain_community/chat_message_histories/rocksetdb.py +++ b/libs/community/langchain_community/chat_message_histories/rocksetdb.py @@ -215,7 +215,7 @@ class RocksetChatMessageHistory(BaseChatMessageHistory): self._create_empty_doc() @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Messages in this chat history.""" return messages_from_dict( self._query( diff --git a/libs/community/langchain_community/chat_message_histories/singlestoredb.py b/libs/community/langchain_community/chat_message_histories/singlestoredb.py index 7dc1e5285c5..df18759e0b7 100644 --- a/libs/community/langchain_community/chat_message_histories/singlestoredb.py +++ b/libs/community/langchain_community/chat_message_histories/singlestoredb.py @@ -212,7 +212,7 @@ class SingleStoreDBChatMessageHistory(BaseChatMessageHistory): conn.close() @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve the messages from SingleStoreDB""" self._create_table_if_not_exists() conn = self.connection_pool.connect() diff --git a/libs/community/langchain_community/chat_message_histories/sql.py b/libs/community/langchain_community/chat_message_histories/sql.py index 8c0706b7ee0..08ba11d1fe9 100644 --- a/libs/community/langchain_community/chat_message_histories/sql.py +++ b/libs/community/langchain_community/chat_message_histories/sql.py @@ -47,7 +47,7 @@ try: from sqlalchemy.ext.asyncio import async_sessionmaker except ImportError: # dummy for sqlalchemy < 2 - async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore + async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore[assignment,misc] logger = logging.getLogger(__name__) @@ -242,7 +242,7 @@ class SQLChatMessageHistory(BaseChatMessageHistory): self._table_created = True @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve all messages from db""" with self._make_sync_session() as session: result = ( diff --git a/libs/community/langchain_community/chat_message_histories/upstash_redis.py b/libs/community/langchain_community/chat_message_histories/upstash_redis.py index de1e7c37822..dd443812cde 100644 --- a/libs/community/langchain_community/chat_message_histories/upstash_redis.py +++ b/libs/community/langchain_community/chat_message_histories/upstash_redis.py @@ -51,7 +51,7 @@ class UpstashRedisChatMessageHistory(BaseChatMessageHistory): return self.key_prefix + self.session_id @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve the messages from Upstash Redis""" _items = self.redis_client.lrange(self.key, 0, -1) items = [json.loads(m) for m in _items[::-1]] diff --git a/libs/community/langchain_community/chat_message_histories/xata.py b/libs/community/langchain_community/chat_message_histories/xata.py index 7c8f9f3430c..c9430913f43 100644 --- a/libs/community/langchain_community/chat_message_histories/xata.py +++ b/libs/community/langchain_community/chat_message_histories/xata.py @@ -83,7 +83,7 @@ class XataChatMessageHistory(BaseChatMessageHistory): raise Exception(f"Error adding message to Xata: {r.status_code} {r}") @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] r = self._client.data().query( self._table_name, payload={ diff --git a/libs/community/langchain_community/chat_message_histories/zep.py b/libs/community/langchain_community/chat_message_histories/zep.py index cd2e056af44..7e7ff47c513 100644 --- a/libs/community/langchain_community/chat_message_histories/zep.py +++ b/libs/community/langchain_community/chat_message_histories/zep.py @@ -87,7 +87,7 @@ class ZepChatMessageHistory(BaseChatMessageHistory): self.session_id = session_id @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve messages from Zep memory""" zep_memory: Optional[Memory] = self._get_memory() if not zep_memory: diff --git a/libs/community/langchain_community/chat_message_histories/zep_cloud.py b/libs/community/langchain_community/chat_message_histories/zep_cloud.py index 0fc36b737dd..c0c787482a2 100644 --- a/libs/community/langchain_community/chat_message_histories/zep_cloud.py +++ b/libs/community/langchain_community/chat_message_histories/zep_cloud.py @@ -134,7 +134,7 @@ class ZepCloudChatMessageHistory(BaseChatMessageHistory): self.summary_instruction = summary_instruction @property - def messages(self) -> List[BaseMessage]: # type: ignore + def messages(self) -> List[BaseMessage]: # type: ignore[override] """Retrieve messages from Zep memory""" zep_memory: Optional[Memory] = self._get_memory() if not zep_memory: diff --git a/libs/community/langchain_community/chat_models/litellm_router.py b/libs/community/langchain_community/chat_models/litellm_router.py index ee3dee32e1b..78d078b5807 100644 --- a/libs/community/langchain_community/chat_models/litellm_router.py +++ b/libs/community/langchain_community/chat_models/litellm_router.py @@ -42,7 +42,7 @@ class ChatLiteLLMRouter(ChatLiteLLM): def __init__(self, *, router: Any, **kwargs: Any) -> None: """Construct Chat LiteLLM Router.""" - super().__init__(router=router, **kwargs) # type: ignore + super().__init__(router=router, **kwargs) # type: ignore[call-arg] self.router = router @property diff --git a/libs/community/langchain_community/chat_models/llamacpp.py b/libs/community/langchain_community/chat_models/llamacpp.py index ea9b5975d11..9714e766250 100644 --- a/libs/community/langchain_community/chat_models/llamacpp.py +++ b/libs/community/langchain_community/chat_models/llamacpp.py @@ -815,4 +815,4 @@ def _convert_delta_to_message_chunk( elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role, id=id_) else: - return default_class(content=content, id=id_) # type: ignore + return default_class(content=content, id=id_) # type: ignore[call-arg] diff --git a/libs/community/langchain_community/chat_models/oci_generative_ai.py b/libs/community/langchain_community/chat_models/oci_generative_ai.py index 716766654d7..71ccb600ce6 100644 --- a/libs/community/langchain_community/chat_models/oci_generative_ai.py +++ b/libs/community/langchain_community/chat_models/oci_generative_ai.py @@ -716,7 +716,7 @@ class ChatOCIGenAI(BaseChatModel, OCIGenAIBase): if is_pydantic_schema: output_parser: OutputParserLike = PydanticToolsParser( tools=[schema], # type: ignore[list-item] - first_tool_only=True, # type: ignore[list-item] + first_tool_only=True, ) else: output_parser = JsonOutputKeyToolsParser( diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index bec0d7ec57f..8f13a4f44d8 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -158,9 +158,9 @@ def _convert_delta_response_to_message_chunk( Optional[str], ]: """Converts delta response to message chunk""" - _delta = response.choices[0].delta # type: ignore - role = _delta.get("role", "") # type: ignore - content = _delta.get("content", "") # type: ignore + _delta = response.choices[0].delta + role = _delta.get("role", "") + content = _delta.get("content", "") additional_kwargs: Dict = {} finish_reasons: Optional[str] = response.choices[0].finish_reason @@ -398,7 +398,7 @@ class ChatPremAI(BaseChatModel, BaseModel): messages, template_id=kwargs["template_id"] ) else: - system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) # type: ignore + system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) if system_prompt is not None and system_prompt != "": kwargs["system_prompt"] = system_prompt @@ -425,9 +425,9 @@ class ChatPremAI(BaseChatModel, BaseModel): if "template_id" in kwargs: system_prompt, messages_to_pass = _messages_to_prompt_dict( messages, template_id=kwargs["template_id"] - ) # type: ignore + ) else: - system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) # type: ignore + system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) if stop is not None: logger.warning("stop is not supported in langchain streaming") diff --git a/libs/community/langchain_community/document_loaders/blackboard.py b/libs/community/langchain_community/document_loaders/blackboard.py index d2e6b1e0a4d..de54e674058 100644 --- a/libs/community/langchain_community/document_loaders/blackboard.py +++ b/libs/community/langchain_community/document_loaders/blackboard.py @@ -218,7 +218,7 @@ class BlackboardLoader(WebBaseLoader): loader = DirectoryLoader( path=self.folder_path, glob="*.pdf", - loader_cls=PyPDFLoader, # type: ignore + loader_cls=PyPDFLoader, # type: ignore[arg-type] ) # Load the documents documents = loader.load() diff --git a/libs/community/langchain_community/document_loaders/blob_loaders/cloud_blob_loader.py b/libs/community/langchain_community/document_loaders/blob_loaders/cloud_blob_loader.py index 0a5efbab54c..65621a67afe 100644 --- a/libs/community/langchain_community/document_loaders/blob_loaders/cloud_blob_loader.py +++ b/libs/community/langchain_community/document_loaders/blob_loaders/cloud_blob_loader.py @@ -35,7 +35,7 @@ class _CloudBlob(Blob): from cloudpathlib import AnyPath if self.data is None and self.path: - return AnyPath(self.path).read_text(encoding=self.encoding) # type: ignore + return AnyPath(self.path).read_text(encoding=self.encoding) elif isinstance(self.data, bytes): return self.data.decode(self.encoding) elif isinstance(self.data, str): @@ -52,7 +52,7 @@ class _CloudBlob(Blob): elif isinstance(self.data, str): return self.data.encode(self.encoding) elif self.data is None and self.path: - return AnyPath(self.path).read_bytes() # type: ignore + return AnyPath(self.path).read_bytes() else: raise ValueError(f"Unable to get bytes for blob {self}") @@ -64,7 +64,7 @@ class _CloudBlob(Blob): if isinstance(self.data, bytes): yield BytesIO(self.data) elif self.data is None and self.path: - return AnyPath(self.path).read_bytes() # type: ignore + return AnyPath(self.path).read_bytes() else: raise NotImplementedError(f"Unable to convert blob {self}") @@ -79,7 +79,7 @@ def _url_to_filename(url: str) -> str: url_parsed = urlparse(url) suffix = Path(url_parsed.path).suffix if url_parsed.scheme in ["s3", "az", "gs"]: - with AnyPath(url).open("rb") as f: # type: ignore + with AnyPath(url).open("rb") as f: temp_file = tempfile.NamedTemporaryFile(suffix=suffix, delete=False) while True: buf = f.read() @@ -116,7 +116,7 @@ def _make_iterator( iterator = _with_tqdm else: - iterator = iter # type: ignore + iterator = iter # type: ignore[assignment] return iterator @@ -220,7 +220,7 @@ class CloudBlobLoader(BlobLoader): def _yield_paths(self) -> Iterable["AnyPath"]: """Yield paths that match the requested pattern.""" - if self.path.is_file(): # type: ignore + if self.path.is_file(): yield self.path return @@ -269,7 +269,7 @@ class CloudBlobLoader(BlobLoader): Blob instance """ if mime_type is None and guess_type: - _mimetype = mimetypes.guess_type(path)[0] if guess_type else None # type: ignore + _mimetype = mimetypes.guess_type(path)[0] if guess_type else None else: _mimetype = mime_type diff --git a/libs/community/langchain_community/document_loaders/googledrive.py b/libs/community/langchain_community/document_loaders/googledrive.py index ea525c10f17..d6b86628850 100644 --- a/libs/community/langchain_community/document_loaders/googledrive.py +++ b/libs/community/langchain_community/document_loaders/googledrive.py @@ -252,7 +252,7 @@ class GoogleDriveLoader(BaseLoader, BaseModel): files = self._fetch_files_recursive(service, folder_id) # If file types filter is provided, we'll filter by the file type. if file_types: - _files = [f for f in files if f["mimeType"] in file_types] # type: ignore + _files = [f for f in files if f["mimeType"] in file_types] else: _files = files @@ -261,14 +261,14 @@ class GoogleDriveLoader(BaseLoader, BaseModel): if file["trashed"] and not self.load_trashed_files: continue elif file["mimeType"] == "application/vnd.google-apps.document": - returns.append(self._load_document_from_id(file["id"])) # type: ignore + returns.append(self._load_document_from_id(file["id"])) # type: ignore[arg-type] elif file["mimeType"] == "application/vnd.google-apps.spreadsheet": - returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore + returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore[arg-type] elif ( file["mimeType"] == "application/pdf" or self.file_loader_cls is not None ): - returns.extend(self._load_file_from_id(file["id"])) # type: ignore + returns.extend(self._load_file_from_id(file["id"])) # type: ignore[arg-type] else: pass return returns diff --git a/libs/community/langchain_community/document_loaders/parsers/docai.py b/libs/community/langchain_community/document_loaders/parsers/docai.py index 74b80f6af35..b17b52ebcb0 100644 --- a/libs/community/langchain_community/document_loaders/parsers/docai.py +++ b/libs/community/langchain_community/document_loaders/parsers/docai.py @@ -267,7 +267,7 @@ class DocAIParser(BaseBlobParser): """Initializes Long-Running Operations from their names.""" try: from google.longrunning.operations_pb2 import ( - GetOperationRequest, # type: ignore + GetOperationRequest, ) except ImportError as exc: raise ImportError( diff --git a/libs/community/langchain_community/document_loaders/parsers/documentloader_adapter.py b/libs/community/langchain_community/document_loaders/parsers/documentloader_adapter.py index 38be56d9a24..65b0ff037ca 100644 --- a/libs/community/langchain_community/document_loaders/parsers/documentloader_adapter.py +++ b/libs/community/langchain_community/document_loaders/parsers/documentloader_adapter.py @@ -59,9 +59,9 @@ class DocumentLoaderAsParser(BaseBlobParser): """ Use underlying DocumentLoader to lazily parse the blob. """ - doc_loader = self.DocumentLoaderClass( + doc_loader = self.DocumentLoaderClass( # type: ignore[call-arg] file_path=blob.path, **self.document_loader_kwargs - ) # type: ignore + ) for document in doc_loader.lazy_load(): document.metadata.update(blob.metadata) yield document diff --git a/libs/community/langchain_community/document_loaders/parsers/images.py b/libs/community/langchain_community/document_loaders/parsers/images.py index 3d977aae973..1b4e1474af5 100644 --- a/libs/community/langchain_community/document_loaders/parsers/images.py +++ b/libs/community/langchain_community/document_loaders/parsers/images.py @@ -107,7 +107,7 @@ class RapidOCRBlobParser(BaseImageBlobParser): "`rapidocr-onnxruntime` package not found, please install it with " "`pip install rapidocr-onnxruntime`" ) - ocr_result, _ = self.ocr(np.array(img)) # type: ignore + ocr_result, _ = self.ocr(np.array(img)) # type: ignore[misc] content = "" if ocr_result: content = ("\n".join([text[1] for text in ocr_result])).strip() diff --git a/libs/community/langchain_community/document_loaders/parsers/language/tree_sitter_segmenter.py b/libs/community/langchain_community/document_loaders/parsers/language/tree_sitter_segmenter.py index 7187cd6b5f5..a467c269f6e 100644 --- a/libs/community/langchain_community/document_loaders/parsers/language/tree_sitter_segmenter.py +++ b/libs/community/langchain_community/document_loaders/parsers/language/tree_sitter_segmenter.py @@ -82,7 +82,7 @@ class TreeSitterSegmenter(CodeSegmenter): ) for line_num in range(start_line + 1, end_line + 1): - simplified_lines[line_num] = None # type: ignore + simplified_lines[line_num] = None # type: ignore[call-overload] processed_lines.update(lines) diff --git a/libs/community/langchain_community/document_loaders/sharepoint.py b/libs/community/langchain_community/document_loaders/sharepoint.py index 6d5a820248e..ace58fd3d12 100644 --- a/libs/community/langchain_community/document_loaders/sharepoint.py +++ b/libs/community/langchain_community/document_loaders/sharepoint.py @@ -6,7 +6,7 @@ import json from pathlib import Path from typing import Any, Dict, Iterator, List, Optional -import requests # type: ignore +import requests from langchain_core.document_loaders import BaseLoader from langchain_core.documents import Document from pydantic import Field diff --git a/libs/community/langchain_community/document_loaders/trello.py b/libs/community/langchain_community/document_loaders/trello.py index a66c0726743..f3db98cd93f 100644 --- a/libs/community/langchain_community/document_loaders/trello.py +++ b/libs/community/langchain_community/document_loaders/trello.py @@ -78,7 +78,7 @@ class TrelloLoader(BaseLoader): """ try: - from trello import TrelloClient # type: ignore + from trello import TrelloClient except ImportError as ex: raise ImportError( "Could not import trello python package. " @@ -124,7 +124,7 @@ class TrelloLoader(BaseLoader): return board def _card_to_doc(self, card: Card, list_dict: dict) -> Document: - from bs4 import BeautifulSoup # type: ignore + from bs4 import BeautifulSoup text_content = "" if self.include_card_name: diff --git a/libs/community/langchain_community/document_loaders/unstructured.py b/libs/community/langchain_community/document_loaders/unstructured.py index edf52a70855..2e77e84f936 100644 --- a/libs/community/langchain_community/document_loaders/unstructured.py +++ b/libs/community/langchain_community/document_loaders/unstructured.py @@ -245,8 +245,8 @@ def get_elements_from_api( from unstructured.partition.api import partition_multiple_via_api _doc_elements = partition_multiple_via_api( - filenames=file_path, # type: ignore - files=file, # type: ignore + filenames=file_path, + files=file, api_key=api_key, api_url=api_url, **unstructured_kwargs, diff --git a/libs/community/langchain_community/document_loaders/web_base.py b/libs/community/langchain_community/document_loaders/web_base.py index 54be2157051..7e227d0c461 100644 --- a/libs/community/langchain_community/document_loaders/web_base.py +++ b/libs/community/langchain_community/document_loaders/web_base.py @@ -393,7 +393,7 @@ class WebBaseLoader(BaseLoader): "https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html" # noqa: E501 ), ) - def aload(self) -> List[Document]: # type: ignore + def aload(self) -> List[Document]: # type: ignore[override] """Load text from the urls in web_path async into Documents.""" results = self.scrape_all(self.web_paths) diff --git a/libs/community/langchain_community/embeddings/__init__.py b/libs/community/langchain_community/embeddings/__init__.py index 7fe88b10ee1..d2d0f3db1ef 100644 --- a/libs/community/langchain_community/embeddings/__init__.py +++ b/libs/community/langchain_community/embeddings/__init__.py @@ -439,7 +439,7 @@ class HypotheticalDocumentEmbedder: ) from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H - return H(*args, **kwargs) # type: ignore + return H(*args, **kwargs) # type: ignore[return-value] @classmethod def from_llm(cls, *args: Any, **kwargs: Any) -> Any: diff --git a/libs/community/langchain_community/embeddings/infinity_local.py b/libs/community/langchain_community/embeddings/infinity_local.py index 075d028c9c7..22e15b017a0 100644 --- a/libs/community/langchain_community/embeddings/infinity_local.py +++ b/libs/community/langchain_community/embeddings/infinity_local.py @@ -68,7 +68,7 @@ class InfinityEmbeddingsLocal(BaseModel, Embeddings): """Validate that api key and python package exists in environment.""" try: - from infinity_emb import AsyncEmbeddingEngine # type: ignore + from infinity_emb import AsyncEmbeddingEngine except ImportError: raise ImportError( "Please install the " diff --git a/libs/community/langchain_community/embeddings/jina.py b/libs/community/langchain_community/embeddings/jina.py index b46ae026397..ad9ea9fd925 100644 --- a/libs/community/langchain_community/embeddings/jina.py +++ b/libs/community/langchain_community/embeddings/jina.py @@ -76,7 +76,7 @@ class JinaEmbeddings(BaseModel, Embeddings): def _embed(self, input: Any) -> List[List[float]]: # Call Jina AI Embedding API - resp = self.session.post( # type: ignore + resp = self.session.post( JINA_API_URL, json={"input": input, "model": self.model_name} ).json() if "data" not in resp: @@ -85,7 +85,7 @@ class JinaEmbeddings(BaseModel, Embeddings): embeddings = resp["data"] # Sort resulting embeddings by index - sorted_embeddings = sorted(embeddings, key=lambda e: e["index"]) # type: ignore + sorted_embeddings = sorted(embeddings, key=lambda e: e["index"]) # Return just the embeddings return [result["embedding"] for result in sorted_embeddings] diff --git a/libs/community/langchain_community/embeddings/textembed.py b/libs/community/langchain_community/embeddings/textembed.py index 57fc85211ab..6d963b3d1b3 100644 --- a/libs/community/langchain_community/embeddings/textembed.py +++ b/libs/community/langchain_community/embeddings/textembed.py @@ -309,7 +309,7 @@ class AsyncOpenAITextEmbedEmbeddingClient: Raises: Exception: If the response status is not 200. """ - async with session.post(**kwargs) as response: # type: ignore + async with session.post(**kwargs) as response: # type: ignore[arg-type] if response.status != 200: raise Exception( f"TextEmbed responded with an unexpected status message " diff --git a/libs/community/langchain_community/example_selectors/ngram_overlap.py b/libs/community/langchain_community/example_selectors/ngram_overlap.py index fdd2e34eaf7..92577acd561 100644 --- a/libs/community/langchain_community/example_selectors/ngram_overlap.py +++ b/libs/community/langchain_community/example_selectors/ngram_overlap.py @@ -22,7 +22,7 @@ def ngram_overlap_score(source: List[str], example: List[str]) -> float: https://aclanthology.org/P02-1040.pdf """ from nltk.translate.bleu_score import ( - SmoothingFunction, # type: ignore + SmoothingFunction, sentence_bleu, ) diff --git a/libs/community/langchain_community/indexes/_sql_record_manager.py b/libs/community/langchain_community/indexes/_sql_record_manager.py index 423df0d0ef1..4e7874d9c10 100644 --- a/libs/community/langchain_community/indexes/_sql_record_manager.py +++ b/libs/community/langchain_community/indexes/_sql_record_manager.py @@ -54,7 +54,7 @@ try: from sqlalchemy.ext.asyncio import async_sessionmaker except ImportError: # dummy for sqlalchemy < 2 - async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore + async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore[assignment,misc] from langchain_community.indexes.base import RecordManager @@ -308,8 +308,8 @@ class SQLRecordManager(RecordManager): [UpsertionRecord.key, UpsertionRecord.namespace], set_=dict( # attr-defined type ignore - updated_at=insert_stmt.excluded.updated_at, # type: ignore - group_id=insert_stmt.excluded.group_id, # type: ignore + updated_at=insert_stmt.excluded.updated_at, + group_id=insert_stmt.excluded.group_id, ), ) elif self.dialect == "postgresql": @@ -322,8 +322,8 @@ class SQLRecordManager(RecordManager): "uix_key_namespace", # Name of constraint set_=dict( # attr-defined type ignore - updated_at=insert_stmt.excluded.updated_at, # type: ignore - group_id=insert_stmt.excluded.group_id, # type: ignore + updated_at=insert_stmt.excluded.updated_at, + group_id=insert_stmt.excluded.group_id, ), ) else: @@ -383,8 +383,8 @@ class SQLRecordManager(RecordManager): [UpsertionRecord.key, UpsertionRecord.namespace], set_=dict( # attr-defined type ignore - updated_at=insert_stmt.excluded.updated_at, # type: ignore - group_id=insert_stmt.excluded.group_id, # type: ignore + updated_at=insert_stmt.excluded.updated_at, + group_id=insert_stmt.excluded.group_id, ), ) elif self.dialect == "postgresql": @@ -397,8 +397,8 @@ class SQLRecordManager(RecordManager): "uix_key_namespace", # Name of constraint set_=dict( # attr-defined type ignore - updated_at=insert_stmt.excluded.updated_at, # type: ignore - group_id=insert_stmt.excluded.group_id, # type: ignore + updated_at=insert_stmt.excluded.updated_at, + group_id=insert_stmt.excluded.group_id, ), ) else: diff --git a/libs/community/langchain_community/llms/azureml_endpoint.py b/libs/community/langchain_community/llms/azureml_endpoint.py index 5b4d39bbc4d..1ec03b9a527 100644 --- a/libs/community/langchain_community/llms/azureml_endpoint.py +++ b/libs/community/langchain_community/llms/azureml_endpoint.py @@ -486,10 +486,10 @@ class AzureMLBaseEndpoint(BaseModel): timeout = values.get("timeout", DEFAULT_TIMEOUT) http_client = AzureMLEndpointClient( - endpoint_url, # type: ignore - endpoint_key.get_secret_value(), # type: ignore - deployment_name, # type: ignore - timeout, # type: ignore + endpoint_url, # type: ignore[arg-type] + endpoint_key.get_secret_value(), # type: ignore[union-attr] + deployment_name, # type: ignore[arg-type] + timeout, ) return http_client diff --git a/libs/community/langchain_community/llms/beam.py b/libs/community/langchain_community/llms/beam.py index 2e7fdcd9388..f5f95d1be62 100644 --- a/libs/community/langchain_community/llms/beam.py +++ b/libs/community/langchain_community/llms/beam.py @@ -201,7 +201,7 @@ class Beam(LLM): # type: ignore[override, override, override, override] def _deploy(self) -> str: """Call to Beam.""" try: - import beam # type: ignore + import beam if beam.__path__ == "": raise ImportError diff --git a/libs/community/langchain_community/llms/gigachat.py b/libs/community/langchain_community/llms/gigachat.py index a867bc19437..0a30d7e658d 100644 --- a/libs/community/langchain_community/llms/gigachat.py +++ b/libs/community/langchain_community/llms/gigachat.py @@ -181,7 +181,7 @@ class _BaseGigaChat(Serializable): def get_num_tokens(self, text: str) -> int: """Count approximate number of tokens""" if self.use_api_for_tokens: - return self.tokens_count([text])[0].tokens # type: ignore + return self.tokens_count([text])[0].tokens else: return round(len(text) / 4.6) diff --git a/libs/community/langchain_community/llms/huggingface_hub.py b/libs/community/langchain_community/llms/huggingface_hub.py index 151aa48f2b2..95e8d8f0d2d 100644 --- a/libs/community/langchain_community/llms/huggingface_hub.py +++ b/libs/community/langchain_community/llms/huggingface_hub.py @@ -142,7 +142,7 @@ class HuggingFaceHub(LLM): if "error" in response: raise ValueError(f"Error raised by inference API: {response['error']}") - response_key = VALID_TASKS_DICT[self.task] # type: ignore + response_key = VALID_TASKS_DICT[self.task] # type: ignore[index] if isinstance(response, list): text = response[0][response_key] else: diff --git a/libs/community/langchain_community/llms/ipex_llm.py b/libs/community/langchain_community/llms/ipex_llm.py index 1758c323d0e..0432b6aecca 100644 --- a/libs/community/langchain_community/llms/ipex_llm.py +++ b/libs/community/langchain_community/llms/ipex_llm.py @@ -172,7 +172,7 @@ class IpexLLM(LLM): if not low_bit_model: if load_in_low_bit is not None: load_function_name = "from_pretrained" - load_kwargs["load_in_low_bit"] = load_in_low_bit # type: ignore + load_kwargs["load_in_low_bit"] = load_in_low_bit # type: ignore[assignment] else: load_function_name = "from_pretrained" load_kwargs["load_in_4bit"] = load_in_4bit diff --git a/libs/community/langchain_community/llms/openai.py b/libs/community/langchain_community/llms/openai.py index d4eb965d452..9d640c2bd44 100644 --- a/libs/community/langchain_community/llms/openai.py +++ b/libs/community/langchain_community/llms/openai.py @@ -246,7 +246,7 @@ class BaseOpenAI(BaseLLM): http_client: Union[Any, None] = None """Optional httpx.Client.""" - def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore + def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore[misc] """Initialize the OpenAI object.""" model_name = data.get("model_name", "") if ( diff --git a/libs/community/langchain_community/llms/yi.py b/libs/community/langchain_community/llms/yi.py index 6b41e8d3378..6f6dc963804 100644 --- a/libs/community/langchain_community/llms/yi.py +++ b/libs/community/langchain_community/llms/yi.py @@ -47,7 +47,7 @@ class YiLLM(LLM): def _post(self, request: Any) -> Any: headers = { "Content-Type": "application/json", - "Authorization": f"Bearer {self.yi_api_key.get_secret_value()}", # type: ignore + "Authorization": f"Bearer {self.yi_api_key.get_secret_value()}", # type: ignore[union-attr] } urls = [] diff --git a/libs/community/langchain_community/output_parsers/ernie_functions.py b/libs/community/langchain_community/output_parsers/ernie_functions.py index 8b80134af22..4b23e9e5916 100644 --- a/libs/community/langchain_community/output_parsers/ernie_functions.py +++ b/libs/community/langchain_community/output_parsers/ernie_functions.py @@ -161,11 +161,11 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any: _result = super().parse_result(result) if self.args_only: - pydantic_args = self.pydantic_schema.parse_raw(_result) # type: ignore + pydantic_args = self.pydantic_schema.parse_raw(_result) # type: ignore[union-attr] else: fn_name = _result["name"] _args = _result["arguments"] - pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore + pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore[index] return pydantic_args diff --git a/libs/community/langchain_community/query_constructors/deeplake.py b/libs/community/langchain_community/query_constructors/deeplake.py index 0c2bf5d0434..d339eb0cdc6 100644 --- a/libs/community/langchain_community/query_constructors/deeplake.py +++ b/libs/community/langchain_community/query_constructors/deeplake.py @@ -53,9 +53,9 @@ class DeepLakeTranslator(Visitor): def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) if isinstance(func, Operator): - value = OPERATOR_TO_TQL[func.value] # type: ignore + value = OPERATOR_TO_TQL[func.value] # type: ignore[index] elif isinstance(func, Comparator): - value = COMPARATOR_TO_TQL[func.value] # type: ignore + value = COMPARATOR_TO_TQL[func.value] # type: ignore[index] return f"{value}" def visit_operation(self, operation: Operation) -> str: diff --git a/libs/community/langchain_community/query_constructors/timescalevector.py b/libs/community/langchain_community/query_constructors/timescalevector.py index bfac120bded..c51718a11e9 100644 --- a/libs/community/langchain_community/query_constructors/timescalevector.py +++ b/libs/community/langchain_community/query_constructors/timescalevector.py @@ -42,9 +42,9 @@ class TimescaleVectorTranslator(Visitor): def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) if isinstance(func, Operator): - value = self.OPERATOR_MAP[func.value] # type: ignore + value = self.OPERATOR_MAP[func.value] # type: ignore[index] elif isinstance(func, Comparator): - value = self.COMPARATOR_MAP[func.value] # type: ignore + value = self.COMPARATOR_MAP[func.value] # type: ignore[index] return f"{value}" def visit_operation(self, operation: Operation) -> client.Predicates: diff --git a/libs/community/langchain_community/storage/sql.py b/libs/community/langchain_community/storage/sql.py index c5b4ae978ff..c9652ae5f53 100644 --- a/libs/community/langchain_community/storage/sql.py +++ b/libs/community/langchain_community/storage/sql.py @@ -41,7 +41,7 @@ try: from sqlalchemy.ext.asyncio import async_sessionmaker except ImportError: # dummy for sqlalchemy < 2 - async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore + async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore[assignment,misc] Base = declarative_base() @@ -255,7 +255,7 @@ class SQLStore(BaseStore[str, bytes]): def yield_keys(self, *, prefix: Optional[str] = None) -> Iterator[str]: with self._make_sync_session() as session: - for v in session.query(LangchainKeyValueStores).filter( # type: ignore + for v in session.query(LangchainKeyValueStores).filter( LangchainKeyValueStores.namespace == self.namespace ): if str(v.key).startswith(prefix or ""): diff --git a/libs/community/langchain_community/tools/databricks/_execution.py b/libs/community/langchain_community/tools/databricks/_execution.py index 09693e55107..67ab2e7c26e 100644 --- a/libs/community/langchain_community/tools/databricks/_execution.py +++ b/libs/community/langchain_community/tools/databricks/_execution.py @@ -178,7 +178,7 @@ def execute_function( statement=parametrized_statement.statement, warehouse_id=warehouse_id, parameters=parametrized_statement.parameters, - **execute_statement_args, # type: ignore + **execute_statement_args, ) if response.status and job_pending(response.status.state) and response.statement_id: statement_id = response.statement_id @@ -197,7 +197,7 @@ def execute_function( f"status after {wait} seconds." ) time.sleep(wait) - response = ws.statement_execution.get_statement(statement_id) # type: ignore + response = ws.statement_execution.get_statement(statement_id) if response.status is None or not job_pending(response.status.state): break wait_time += wait @@ -228,7 +228,7 @@ def execute_function( if is_scalar(function): value = None if data_array and len(data_array) > 0 and len(data_array[0]) > 0: - value = str(data_array[0][0]) # type: ignore + value = str(data_array[0][0]) return FunctionExecutionResult( format="SCALAR", value=value, truncated=truncated ) diff --git a/libs/community/langchain_community/tools/databricks/tool.py b/libs/community/langchain_community/tools/databricks/tool.py index 67ef21148e4..81932cdfdf2 100644 --- a/libs/community/langchain_community/tools/databricks/tool.py +++ b/libs/community/langchain_community/tools/databricks/tool.py @@ -51,8 +51,8 @@ def _uc_type_to_pydantic_type(uc_type_json: Union[str, Dict[str, Any]]) -> Type: if tpe == "array": element_type = _uc_type_to_pydantic_type(uc_type_json["elementType"]) if uc_type_json["containsNull"]: - element_type = Optional[element_type] # type: ignore - return List[element_type] # type: ignore + element_type = Optional[element_type] # type: ignore[assignment] + return List[element_type] # type: ignore[valid-type] elif tpe == "map": key_type = uc_type_json["keyType"] assert key_type == "string", TypeError( @@ -60,14 +60,14 @@ def _uc_type_to_pydantic_type(uc_type_json: Union[str, Dict[str, Any]]) -> Type: ) value_type = _uc_type_to_pydantic_type(uc_type_json["valueType"]) if uc_type_json["valueContainsNull"]: - value_type: Type = Optional[value_type] # type: ignore - return Dict[str, value_type] # type: ignore + value_type: Type = Optional[value_type] # type: ignore[no-redef] + return Dict[str, value_type] # type: ignore[valid-type] elif tpe == "struct": fields = {} for field in uc_type_json["fields"]: field_type = _uc_type_to_pydantic_type(field["type"]) if field.get("nullable"): - field_type = Optional[field_type] # type: ignore + field_type = Optional[field_type] # type: ignore[assignment] comment = ( uc_type_json["metadata"].get("comment") if "metadata" in uc_type_json @@ -76,7 +76,7 @@ def _uc_type_to_pydantic_type(uc_type_json: Union[str, Dict[str, Any]]) -> Type: fields[field["name"]] = (field_type, Field(..., description=comment)) uc_type_json_str = json.dumps(uc_type_json, sort_keys=True) type_hash = md5(uc_type_json_str.encode()).hexdigest()[:8] - return create_model(f"Struct_{type_hash}", **fields) # type: ignore + return create_model(f"Struct_{type_hash}", **fields) # type: ignore[call-overload] else: raise TypeError(f"Unknown type {uc_type_json}. Try upgrading this package.") @@ -94,7 +94,7 @@ def _generate_args_schema(function: "FunctionInfo") -> Type[BaseModel]: description = p.comment default: Any = ... if p.parameter_default: - pydantic_type = Optional[pydantic_type] # type: ignore + pydantic_type = Optional[pydantic_type] # type: ignore[assignment] default = None # TODO: Convert default value string to the correct type. # We might need to use statement execution API @@ -108,9 +108,9 @@ def _generate_args_schema(function: "FunctionInfo") -> Type[BaseModel]: pydantic_type, Field(default=default, description=description), ) - return create_model( + return create_model( # type: ignore[call-overload] f"{function.catalog_name}__{function.schema_name}__{function.name}__params", - **fields, # type: ignore + **fields, ) diff --git a/libs/community/langchain_community/tools/nuclia/tool.py b/libs/community/langchain_community/tools/nuclia/tool.py index 8bae0929a74..e5d4e447527 100644 --- a/libs/community/langchain_community/tools/nuclia/tool.py +++ b/libs/community/langchain_community/tools/nuclia/tool.py @@ -217,10 +217,10 @@ class NucliaUnderstandingAPI(BaseTool): # type: ignore[override, override] logger.info(f"No matching id for {uuid}") else: self._results[matching_id]["status"] = "done" - data = MessageToJson( + data = MessageToJson( # type: ignore[call-arg] pb, preserving_proto_field_name=True, - including_default_value_fields=True, # type: ignore + including_default_value_fields=True, ) self._results[matching_id]["data"] = data diff --git a/libs/community/langchain_community/tools/zapier/tool.py b/libs/community/langchain_community/tools/zapier/tool.py index cc7f0f46890..3d00392e75e 100644 --- a/libs/community/langchain_community/tools/zapier/tool.py +++ b/libs/community/langchain_community/tools/zapier/tool.py @@ -161,9 +161,7 @@ class ZapierNLARunAction(BaseTool): # type: ignore[override] ) -ZapierNLARunAction.__doc__ = ( - ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore -) +ZapierNLARunAction.__doc__ = ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore[operator] # other useful actions @@ -210,5 +208,5 @@ class ZapierNLAListActions(BaseTool): # type: ignore[override] ZapierNLAListActions.__doc__ = ( - ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore + ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore[operator] ) diff --git a/libs/community/langchain_community/utilities/bing_search.py b/libs/community/langchain_community/utilities/bing_search.py index a3dc03fdea9..ecd6a935a40 100644 --- a/libs/community/langchain_community/utilities/bing_search.py +++ b/libs/community/langchain_community/utilities/bing_search.py @@ -50,7 +50,7 @@ class BingSearchAPIWrapper(BaseModel): response = requests.get( self.bing_search_url, headers=headers, - params=params, # type: ignore + params=params, ) response.raise_for_status() search_results = response.json() diff --git a/libs/community/langchain_community/utilities/google_scholar.py b/libs/community/langchain_community/utilities/google_scholar.py index 06b3be83f20..de9e6b30641 100644 --- a/libs/community/langchain_community/utilities/google_scholar.py +++ b/libs/community/langchain_community/utilities/google_scholar.py @@ -82,7 +82,7 @@ class GoogleScholarAPIWrapper(BaseModel): # 0 is the first page of results, 20 is the 2nd page of results, # 40 is the 3rd page of results, etc. results = ( - self.google_scholar_engine( # type: ignore + self.google_scholar_engine( { "q": query, "start": page, @@ -106,7 +106,7 @@ class GoogleScholarAPIWrapper(BaseModel): ): # From the last page we would only need top_k_results%20 results # if k is not divisible by 20. results = ( - self.google_scholar_engine( # type: ignore + self.google_scholar_engine( { "q": query, "start": page, diff --git a/libs/community/langchain_community/utilities/metaphor_search.py b/libs/community/langchain_community/utilities/metaphor_search.py index cbfab0d35e8..b95303c63ae 100644 --- a/libs/community/langchain_community/utilities/metaphor_search.py +++ b/libs/community/langchain_community/utilities/metaphor_search.py @@ -49,7 +49,6 @@ class MetaphorSearchAPIWrapper(BaseModel): "useAutoprompt": use_autoprompt, } response = requests.post( - # type: ignore f"{METAPHOR_API_URL}/search", headers=headers, json=params, diff --git a/libs/community/langchain_community/utilities/openapi.py b/libs/community/langchain_community/utilities/openapi.py index 9e6ff44cf12..6c1fac04ee6 100644 --- a/libs/community/langchain_community/utilities/openapi.py +++ b/libs/community/langchain_community/utilities/openapi.py @@ -53,7 +53,7 @@ if TYPE_CHECKING: try: from openapi_pydantic import OpenAPI except ImportError: - OpenAPI = object # type: ignore + OpenAPI = object class OpenAPISpec(OpenAPI): diff --git a/libs/community/langchain_community/utilities/passio_nutrition_ai.py b/libs/community/langchain_community/utilities/passio_nutrition_ai.py index 41caef5eb00..aca880cc417 100644 --- a/libs/community/langchain_community/utilities/passio_nutrition_ai.py +++ b/libs/community/langchain_community/utilities/passio_nutrition_ai.py @@ -134,7 +134,7 @@ class NutritionAIAPI(BaseModel): return requests.get( self.nutritionai_api_url, headers=self.auth_.headers, - params=params, # type: ignore + params=params, ) def _api_call_results(self, search_term: str) -> dict: diff --git a/libs/community/langchain_community/utilities/sql_database.py b/libs/community/langchain_community/utilities/sql_database.py index 1a94434afde..c300bedc700 100644 --- a/libs/community/langchain_community/utilities/sql_database.py +++ b/libs/community/langchain_community/utilities/sql_database.py @@ -395,7 +395,7 @@ class SQLDatabase: try: # get the sample rows with self._engine.connect() as connection: - sample_rows_result = connection.execute(command) # type: ignore + sample_rows_result = connection.execute(command) # shorten values in the sample rows sample_rows = list( map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result) diff --git a/libs/community/langchain_community/utilities/steam.py b/libs/community/langchain_community/utilities/steam.py index 96998b999e5..d4243d8296f 100644 --- a/libs/community/langchain_community/utilities/steam.py +++ b/libs/community/langchain_community/utilities/steam.py @@ -123,10 +123,10 @@ class SteamWebAPIWrapper(BaseModel): except ImportError: raise ImportError("steamspypi library is not installed.") users_games = self.get_users_games(steam_id) - result = {} # type: ignore + result: dict[str, int] = {} most_popular_genre = "" most_popular_genre_count = 0 - for game in users_games["games"]: # type: ignore + for game in users_games["games"]: # type: ignore[call-overload] appid = game["appid"] data_request = {"request": "appdetails", "appid": appid} genreStore = steamspypi.download(data_request) @@ -148,7 +148,7 @@ class SteamWebAPIWrapper(BaseModel): sorted_data = sorted( data.values(), key=lambda x: x.get("average_forever", 0), reverse=True ) - owned_games = [game["appid"] for game in users_games["games"]] # type: ignore + owned_games = [game["appid"] for game in users_games["games"]] # type: ignore[call-overload] remaining_games = [ game for game in sorted_data if game["appid"] not in owned_games ] diff --git a/libs/community/langchain_community/utilities/tavily_search.py b/libs/community/langchain_community/utilities/tavily_search.py index 8dfb2b989d3..dc8b896043f 100644 --- a/libs/community/langchain_community/utilities/tavily_search.py +++ b/libs/community/langchain_community/utilities/tavily_search.py @@ -58,7 +58,6 @@ class TavilySearchAPIWrapper(BaseModel): "include_images": include_images, } response = requests.post( - # type: ignore f"{TAVILY_API_URL}/search", json=params, ) diff --git a/libs/community/langchain_community/utilities/you.py b/libs/community/langchain_community/utilities/you.py index dadb2309c2f..7c31ebf014e 100644 --- a/libs/community/langchain_community/utilities/you.py +++ b/libs/community/langchain_community/utilities/you.py @@ -240,7 +240,6 @@ class YouSearchAPIWrapper(BaseModel): if self.endpoint_type == "snippet": self.endpoint_type = "search" response = requests.get( - # type: ignore f"{YOU_API_URL}/{self.endpoint_type}", params=params, headers=headers, diff --git a/libs/community/langchain_community/utils/math.py b/libs/community/langchain_community/utils/math.py index d2f242bb319..59b7440ddbc 100644 --- a/libs/community/langchain_community/utils/math.py +++ b/libs/community/langchain_community/utils/math.py @@ -71,4 +71,4 @@ def cosine_similarity_top_k( top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][::-1] ret_idxs = np.unravel_index(top_k_idxs, score_array.shape) scores = score_array.ravel()[top_k_idxs].tolist() - return list(zip(*ret_idxs)), scores # type: ignore + return list(zip(*ret_idxs)), scores # type: ignore[return-value] diff --git a/libs/community/langchain_community/vectorstores/azure_cosmos_db.py b/libs/community/langchain_community/vectorstores/azure_cosmos_db.py index e91f6c210a3..9e90a80b425 100644 --- a/libs/community/langchain_community/vectorstores/azure_cosmos_db.py +++ b/libs/community/langchain_community/vectorstores/azure_cosmos_db.py @@ -407,7 +407,7 @@ class AzureCosmosDBVectorSearch(VectorStore): for t, m, embedding in zip(texts, metadatas, embeddings) ] # insert the documents in Cosmos DB - insert_result = self._collection.insert_many(to_insert) # type: ignore + insert_result = self._collection.insert_many(to_insert) return insert_result.inserted_ids @classmethod diff --git a/libs/community/langchain_community/vectorstores/azuresearch.py b/libs/community/langchain_community/vectorstores/azuresearch.py index 4ed0d3f4d6d..e2671db6489 100644 --- a/libs/community/langchain_community/vectorstores/azuresearch.py +++ b/libs/community/langchain_community/vectorstores/azuresearch.py @@ -1571,7 +1571,7 @@ class AzureSearch(VectorStore): azure_search.add_embeddings(text_embeddings, metadatas, **kwargs) return azure_search - def as_retriever(self, **kwargs: Any) -> AzureSearchVectorStoreRetriever: # type: ignore + def as_retriever(self, **kwargs: Any) -> AzureSearchVectorStoreRetriever: # type: ignore[override] """Return AzureSearchVectorStoreRetriever initialized from this VectorStore. Args: @@ -1781,7 +1781,7 @@ async def _areorder_results_with_maximal_marginal_relevance( # Function can return -1 index if x == -1: break - ret.append((documents[x], scores[x])) # type: ignore + ret.append((documents[x], scores[x])) return ret @@ -1816,7 +1816,7 @@ def _reorder_results_with_maximal_marginal_relevance( # Function can return -1 index if x == -1: break - ret.append((documents[x], scores[x])) # type: ignore + ret.append((documents[x], scores[x])) return ret diff --git a/libs/community/langchain_community/vectorstores/bigquery_vector_search.py b/libs/community/langchain_community/vectorstores/bigquery_vector_search.py index 051b26a5343..670005f7dfd 100644 --- a/libs/community/langchain_community/vectorstores/bigquery_vector_search.py +++ b/libs/community/langchain_community/vectorstores/bigquery_vector_search.py @@ -656,7 +656,7 @@ class BigQueryVectorSearch(VectorStore): Returns: List of Documents most similar to the query vector, with similarity scores. """ - emb = self.embedding_model.embed_query(query) # type: ignore + emb = self.embedding_model.embed_query(query) return self.similarity_search_with_score_by_vector( emb, k, filter, brute_force, fraction_lists_to_search, **kwargs ) @@ -738,9 +738,7 @@ class BigQueryVectorSearch(VectorStore): Returns: List of Documents selected by maximal marginal relevance. """ - query_embedding = self.embedding_model.embed_query( # type: ignore - query - ) + query_embedding = self.embedding_model.embed_query(query) doc_tuples = self._search_with_score_and_embeddings_by_vector( query_embedding, fetch_k, filter, brute_force, fraction_lists_to_search ) diff --git a/libs/community/langchain_community/vectorstores/clarifai.py b/libs/community/langchain_community/vectorstores/clarifai.py index 4cae4bf7cf9..2cf87419bc2 100644 --- a/libs/community/langchain_community/vectorstores/clarifai.py +++ b/libs/community/langchain_community/vectorstores/clarifai.py @@ -183,7 +183,7 @@ class Clarifai(VectorStore): try: from clarifai.client.search import Search from clarifai_grpc.grpc.api import resources_pb2 - from google.protobuf import json_format # type: ignore + from google.protobuf import json_format except ImportError as e: raise ImportError( "Could not import clarifai python package. " diff --git a/libs/community/langchain_community/vectorstores/deeplake.py b/libs/community/langchain_community/vectorstores/deeplake.py index c78a7903c41..d0c85657419 100644 --- a/libs/community/langchain_community/vectorstores/deeplake.py +++ b/libs/community/langchain_community/vectorstores/deeplake.py @@ -275,7 +275,7 @@ class DeepLake(VectorStore): metadata=metadatas, embedding_data=texts, embedding_tensor="embedding", - embedding_function=self._embedding_function.embed_documents, # type: ignore + embedding_function=self._embedding_function.embed_documents, # type: ignore[union-attr] return_ids=True, **kwargs, ) @@ -464,8 +464,8 @@ class DeepLake(VectorStore): if use_maximal_marginal_relevance: lambda_mult = kwargs.get("lambda_mult", 0.5) - indices = maximal_marginal_relevance( # type: ignore - embedding, # type: ignore + indices = maximal_marginal_relevance( + embedding, # type: ignore[arg-type] embeddings, k=min(k, len(texts)), lambda_mult=lambda_mult, @@ -829,7 +829,7 @@ class DeepLake(VectorStore): use_maximal_marginal_relevance=True, lambda_mult=lambda_mult, exec_option=exec_option, - embedding_function=embedding_function, # type: ignore + embedding_function=embedding_function, # type: ignore[arg-type] **kwargs, ) diff --git a/libs/community/langchain_community/vectorstores/docarray/base.py b/libs/community/langchain_community/vectorstores/docarray/base.py index 9e209ad61eb..82c5b7b5b2a 100644 --- a/libs/community/langchain_community/vectorstores/docarray/base.py +++ b/libs/community/langchain_community/vectorstores/docarray/base.py @@ -103,7 +103,7 @@ class DocArrayIndex(VectorStore, ABC): Lower score represents more similarity. """ query_embedding = self.embedding.embed_query(query) - query_doc = self.doc_cls(embedding=query_embedding) # type: ignore + query_doc = self.doc_cls(embedding=query_embedding) docs, scores = self.doc_index.find(query_doc, search_field="embedding", limit=k) result = [ @@ -152,7 +152,7 @@ class DocArrayIndex(VectorStore, ABC): List of Documents most similar to the query vector. """ - query_doc = self.doc_cls(embedding=embedding) # type: ignore + query_doc = self.doc_cls(embedding=embedding) docs = self.doc_index.find( query_doc, search_field="embedding", limit=k ).documents @@ -187,7 +187,7 @@ class DocArrayIndex(VectorStore, ABC): List of Documents selected by maximal marginal relevance. """ query_embedding = self.embedding.embed_query(query) - query_doc = self.doc_cls(embedding=query_embedding) # type: ignore + query_doc = self.doc_cls(embedding=query_embedding) docs = self.doc_index.find( query_doc, search_field="embedding", limit=fetch_k diff --git a/libs/community/langchain_community/vectorstores/docarray/hnsw.py b/libs/community/langchain_community/vectorstores/docarray/hnsw.py index 805dad1e318..6ce8986d43d 100644 --- a/libs/community/langchain_community/vectorstores/docarray/hnsw.py +++ b/libs/community/langchain_community/vectorstores/docarray/hnsw.py @@ -71,7 +71,7 @@ class DocArrayHnswSearch(DocArrayIndex): num_threads=num_threads, **kwargs, ) - doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir) # type: ignore + doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir) return cls(doc_index, embedding) @classmethod diff --git a/libs/community/langchain_community/vectorstores/docarray/in_memory.py b/libs/community/langchain_community/vectorstores/docarray/in_memory.py index 32fe95d9506..9ec1eb5ea20 100644 --- a/libs/community/langchain_community/vectorstores/docarray/in_memory.py +++ b/libs/community/langchain_community/vectorstores/docarray/in_memory.py @@ -41,7 +41,7 @@ class DocArrayInMemorySearch(DocArrayIndex): from docarray.index import InMemoryExactNNIndex doc_cls = cls._get_doc_cls(space=metric, **kwargs) - doc_index = InMemoryExactNNIndex[doc_cls]() # type: ignore + doc_index = InMemoryExactNNIndex[doc_cls]() return cls(doc_index, embedding) @classmethod diff --git a/libs/community/langchain_community/vectorstores/documentdb.py b/libs/community/langchain_community/vectorstores/documentdb.py index 8730493b9d6..081245d249a 100644 --- a/libs/community/langchain_community/vectorstores/documentdb.py +++ b/libs/community/langchain_community/vectorstores/documentdb.py @@ -265,7 +265,7 @@ class DocumentDBVectorSearch(VectorStore): for t, m, embedding in zip(texts, metadatas, embeddings) ] # insert the documents in DocumentDB - insert_result = self._collection.insert_many(to_insert) # type: ignore + insert_result = self._collection.insert_many(to_insert) return insert_result.inserted_ids @classmethod diff --git a/libs/community/langchain_community/vectorstores/duckdb.py b/libs/community/langchain_community/vectorstores/duckdb.py index 89b4a2f576a..6b230a62674 100644 --- a/libs/community/langchain_community/vectorstores/duckdb.py +++ b/libs/community/langchain_community/vectorstores/duckdb.py @@ -220,7 +220,7 @@ class DuckDB(VectorStore): except ImportError: warnings.warn("You may need to `pip install pandas` to use this method.") - embedding = self._embedding.embed_query(query) # type: ignore + embedding = self._embedding.embed_query(query) list_cosine_similarity = self.duckdb.FunctionExpression( "list_cosine_similarity", self.duckdb.ColumnExpression(self._vector_key), @@ -265,7 +265,7 @@ class DuckDB(VectorStore): A list of Documents most similar to the query. """ - embedding = self._embedding.embed_query(query) # type: ignore + embedding = self._embedding.embed_query(query) list_cosine_similarity = self.duckdb.FunctionExpression( "list_cosine_similarity", self.duckdb.ColumnExpression(self._vector_key), diff --git a/libs/community/langchain_community/vectorstores/infinispanvs.py b/libs/community/langchain_community/vectorstores/infinispanvs.py index c9012e17fbe..87c9274e9c8 100644 --- a/libs/community/langchain_community/vectorstores/infinispanvs.py +++ b/libs/community/langchain_community/vectorstores/infinispanvs.py @@ -254,7 +254,7 @@ repeated float %s = 1; texts_l = list(texts) if last_vector: texts_l.pop() - embeds = self._embedding.embed_documents(texts_l) # type: ignore + embeds = self._embedding.embed_documents(texts_l) # type: ignore[union-attr] if last_vector: embeds.append(last_vector) if not metadatas: @@ -288,7 +288,7 @@ repeated float %s = 1; Returns: List[Tuple[Document, float]] """ - embed = self._embedding.embed_query(query) # type: ignore + embed = self._embedding.embed_query(query) # type: ignore[union-attr] documents = self.similarity_search_with_score_by_vector(embedding=embed, k=k) return documents diff --git a/libs/community/langchain_community/vectorstores/lancedb.py b/libs/community/langchain_community/vectorstores/lancedb.py index d000380053c..90bc8b81e7c 100644 --- a/libs/community/langchain_community/vectorstores/lancedb.py +++ b/libs/community/langchain_community/vectorstores/lancedb.py @@ -201,7 +201,7 @@ class LanceDB(VectorStore): """ docs = [] ids = ids or [str(uuid.uuid4()) for _ in texts] - embeddings = self._embedding.embed_documents(list(texts)) # type: ignore + embeddings = self._embedding.embed_documents(list(texts)) # type: ignore[union-attr] for idx, text in enumerate(texts): embedding = embeddings[idx] metadata = metadatas[idx] if metadatas else {"id": ids[idx]} @@ -490,7 +490,7 @@ class LanceDB(VectorStore): embedding = self._embedding.embed_query(query) _query = (embedding, query) else: - _query = query # type: ignore + _query = query # type: ignore[assignment] res = self._query(_query, k, filter=filter, name=name, **kwargs) return self.results_to_docs(res, score=score) diff --git a/libs/community/langchain_community/vectorstores/lantern.py b/libs/community/langchain_community/vectorstores/lantern.py index 75e4d012ae5..326fc4d4a56 100644 --- a/libs/community/langchain_community/vectorstores/lantern.py +++ b/libs/community/langchain_community/vectorstores/lantern.py @@ -58,9 +58,9 @@ def get_embedding_store( embedding_type = None if distance_strategy == DistanceStrategy.HAMMING: - embedding_type = sqlalchemy.INTEGER # type: ignore + embedding_type = sqlalchemy.INTEGER else: - embedding_type = sqlalchemy.REAL # type: ignore + embedding_type = sqlalchemy.REAL # type: ignore[assignment] DynamicBase = declarative_base(class_registry=dict()) # type: Any @@ -74,7 +74,7 @@ def get_embedding_store( cmetadata = sqlalchemy.Column(JSON, nullable=True) # custom_id : any user defined id custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True) - embedding = sqlalchemy.Column(sqlalchemy.ARRAY(embedding_type)) # type: ignore + embedding = sqlalchemy.Column(sqlalchemy.ARRAY(embedding_type)) # type: ignore[arg-type,var-annotated] return EmbeddingStore diff --git a/libs/community/langchain_community/vectorstores/momento_vector_index.py b/libs/community/langchain_community/vectorstores/momento_vector_index.py index ebfa3c4d0e9..9e8fe3ab60e 100644 --- a/libs/community/langchain_community/vectorstores/momento_vector_index.py +++ b/libs/community/langchain_community/vectorstores/momento_vector_index.py @@ -397,7 +397,7 @@ class MomentoVectorIndex(VectorStore): ) selected = [response.hits[i].metadata for i in mmr_selected] return [ - Document(page_content=metadata.pop(self.text_field, ""), metadata=metadata) # type: ignore + Document(page_content=metadata.pop(self.text_field, ""), metadata=metadata) for metadata in selected ] @@ -484,6 +484,6 @@ class MomentoVectorIndex(VectorStore): configuration=VectorIndexConfigurations.Default.latest(), credential_provider=CredentialProvider.from_string(api_key), ) - vector_db = cls(embedding=embedding, client=client, **kwargs) # type: ignore + vector_db = cls(embedding=embedding, client=client, **kwargs) # type: ignore[call-arg] vector_db.add_texts(texts=texts, metadatas=metadatas, **kwargs) return vector_db diff --git a/libs/community/langchain_community/vectorstores/mongodb_atlas.py b/libs/community/langchain_community/vectorstores/mongodb_atlas.py index ec4a07c3dd0..2c23ae92c40 100644 --- a/libs/community/langchain_community/vectorstores/mongodb_atlas.py +++ b/libs/community/langchain_community/vectorstores/mongodb_atlas.py @@ -183,7 +183,7 @@ class MongoDBAtlasVectorSearch(VectorStore): for t, m, embedding in zip(texts, metadatas, embeddings) ] # insert the documents in MongoDB Atlas - insert_result = self._collection.insert_many(to_insert) # type: ignore + insert_result = self._collection.insert_many(to_insert) return insert_result.inserted_ids def _similarity_search_with_score( diff --git a/libs/community/langchain_community/vectorstores/oraclevs.py b/libs/community/langchain_community/vectorstores/oraclevs.py index 0e0181c67f4..f39c7ba1646 100644 --- a/libs/community/langchain_community/vectorstores/oraclevs.py +++ b/libs/community/langchain_community/vectorstores/oraclevs.py @@ -857,7 +857,7 @@ class OracleVS(VectorStore): ) documents.append((document, distance, current_embedding)) - return documents # type: ignore + return documents @_handle_exceptions def max_marginal_relevance_search_with_score_by_vector( diff --git a/libs/community/langchain_community/vectorstores/pgembedding.py b/libs/community/langchain_community/vectorstores/pgembedding.py index 48f04ffef31..158d755c5cd 100644 --- a/libs/community/langchain_community/vectorstores/pgembedding.py +++ b/libs/community/langchain_community/vectorstores/pgembedding.py @@ -49,7 +49,7 @@ class CollectionStore(BaseModel): @classmethod def get_by_name(cls, session: Session, name: str) -> Optional["CollectionStore"]: - return session.query(cls).filter(cls.name == name).first() # type: ignore + return session.query(cls).filter(cls.name == name).first() @classmethod def get_or_create( @@ -88,7 +88,7 @@ class EmbeddingStore(BaseModel): ) collection = relationship(CollectionStore, back_populates="embeddings") - embedding = sqlalchemy.Column(sqlalchemy.ARRAY(sqlalchemy.REAL)) # type: ignore + embedding = sqlalchemy.Column(sqlalchemy.ARRAY(sqlalchemy.REAL)) # type: ignore[var-annotated] document = sqlalchemy.Column(sqlalchemy.String, nullable=True) cmetadata = sqlalchemy.Column(JSON, nullable=True) diff --git a/libs/community/langchain_community/vectorstores/pgvector.py b/libs/community/langchain_community/vectorstores/pgvector.py index 1f4fef445dc..c2a9cd8b697 100644 --- a/libs/community/langchain_community/vectorstores/pgvector.py +++ b/libs/community/langchain_community/vectorstores/pgvector.py @@ -33,7 +33,7 @@ try: from sqlalchemy import SQLColumnExpression except ImportError: # for sqlalchemy < 2 - SQLColumnExpression = Any # type: ignore + SQLColumnExpression = Any # type: ignore[assignment,misc] from langchain_core.documents import Document from langchain_core.embeddings import Embeddings @@ -126,7 +126,7 @@ def _get_embedding_collection_store( def get_by_name( cls, session: Session, name: str ) -> Optional["CollectionStore"]: - return session.query(cls).filter(cls.name == name).first() # type: ignore + return session.query(cls).filter(cls.name == name).first() @classmethod def get_or_create( @@ -956,7 +956,7 @@ class PGVector(VectorStore): results: List[Any] = ( session.query( self.EmbeddingStore, - self.distance_strategy(embedding).label("distance"), # type: ignore + self.distance_strategy(embedding).label("distance"), ) .filter(*filter_by) .order_by(sqlalchemy.asc("distance")) diff --git a/libs/community/langchain_community/vectorstores/redis/base.py b/libs/community/langchain_community/vectorstores/redis/base.py index b43414bc270..a4efc12a7d9 100644 --- a/libs/community/langchain_community/vectorstores/redis/base.py +++ b/libs/community/langchain_community/vectorstores/redis/base.py @@ -419,7 +419,7 @@ class Redis(VectorStore): # type check for metadata if metadatas: - if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore + if isinstance(metadatas, list) and len(metadatas) != len(texts): raise ValueError("Number of metadatas must match number of texts") if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)): raise ValueError("Metadatas must be a list of dicts") @@ -427,7 +427,7 @@ class Redis(VectorStore): generated_schema = _generate_field_schema(metadatas[0]) if index_schema: # read in the schema solely to compare to the generated schema - user_schema = read_schema(index_schema) # type: ignore + user_schema = read_schema(index_schema) # the very rare case where a super user decides to pass the index # schema and a document loader is used that has metadata which @@ -722,7 +722,7 @@ class Redis(VectorStore): # type check for metadata if metadatas: - if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore + if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore[arg-type] raise ValueError("Number of metadatas must match number of texts") if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)): raise ValueError("Metadatas must be a list of dicts") @@ -850,7 +850,7 @@ class Redis(VectorStore): # Perform vector search # ignore type because redis-py is wrong about bytes try: - results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore + results = self.client.ft(self.index_name).search(redis_query, params_dict) except redis.exceptions.ResponseError as e: # split error message and see if it starts with "Syntax" if str(e).split(" ")[0] == "Syntax": @@ -966,7 +966,7 @@ class Redis(VectorStore): # Perform vector search # ignore type because redis-py is wrong about bytes try: - results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore + results = self.client.ft(self.index_name).search(redis_query, params_dict) except redis.exceptions.ResponseError as e: # split error message and see if it starts with "Syntax" if str(e).split(" ")[0] == "Syntax": @@ -1206,7 +1206,7 @@ class Redis(VectorStore): # read in schema (yaml file or dict) and # pass to the Pydantic validators if index_schema: - schema_values = read_schema(index_schema) # type: ignore + schema_values = read_schema(index_schema) schema = RedisModel(**schema_values) # ensure user did not exclude the content field @@ -1242,7 +1242,7 @@ class Redis(VectorStore): def _create_index_if_not_exist(self, dim: int = 1536) -> None: try: - from redis.commands.search.indexDefinition import ( # type: ignore + from redis.commands.search.indexDefinition import ( IndexDefinition, IndexType, ) diff --git a/libs/community/langchain_community/vectorstores/redis/filters.py b/libs/community/langchain_community/vectorstores/redis/filters.py index 09a58bd281d..7dc0cc0ff6b 100644 --- a/libs/community/langchain_community/vectorstores/redis/filters.py +++ b/libs/community/langchain_community/vectorstores/redis/filters.py @@ -140,7 +140,7 @@ class RedisTag(RedisFilterField): elif isinstance(other, str): other = [other] - self._set_value(other, self.SUPPORTED_VAL_TYPES, operator) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, operator) # type: ignore[arg-type] @check_operator_misuse def __eq__( @@ -240,7 +240,7 @@ class RedisNum(RedisFilterField): >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("zipcode") == 90210 """ - self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.EQ) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.EQ) # type: ignore[arg-type] return RedisFilterExpression(str(self)) @check_operator_misuse @@ -254,7 +254,7 @@ class RedisNum(RedisFilterField): >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("zipcode") != 90210 """ - self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.NE) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.NE) # type: ignore[arg-type] return RedisFilterExpression(str(self)) def __gt__(self, other: Union[int, float]) -> "RedisFilterExpression": @@ -267,7 +267,7 @@ class RedisNum(RedisFilterField): >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("age") > 18 """ - self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.GT) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.GT) # type: ignore[arg-type] return RedisFilterExpression(str(self)) def __lt__(self, other: Union[int, float]) -> "RedisFilterExpression": @@ -280,7 +280,7 @@ class RedisNum(RedisFilterField): >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("age") < 18 """ - self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LT) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LT) # type: ignore[arg-type] return RedisFilterExpression(str(self)) def __ge__(self, other: Union[int, float]) -> "RedisFilterExpression": @@ -293,7 +293,7 @@ class RedisNum(RedisFilterField): >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("age") >= 18 """ - self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.GE) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.GE) # type: ignore[arg-type] return RedisFilterExpression(str(self)) def __le__(self, other: Union[int, float]) -> "RedisFilterExpression": @@ -306,7 +306,7 @@ class RedisNum(RedisFilterField): >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("age") <= 18 """ - self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LE) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LE) # type: ignore[arg-type] return RedisFilterExpression(str(self)) @@ -336,7 +336,7 @@ class RedisText(RedisFilterField): >>> from langchain_community.vectorstores.redis import RedisText >>> filter = RedisText("job") == "engineer" """ - self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.EQ) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.EQ) # type: ignore[arg-type] return RedisFilterExpression(str(self)) @check_operator_misuse @@ -350,7 +350,7 @@ class RedisText(RedisFilterField): >>> from langchain_community.vectorstores.redis import RedisText >>> filter = RedisText("job") != "engineer" """ - self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.NE) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.NE) # type: ignore[arg-type] return RedisFilterExpression(str(self)) def __mod__(self, other: str) -> "RedisFilterExpression": @@ -366,7 +366,7 @@ class RedisText(RedisFilterField): >>> filter = RedisText("job") % "engineer|doctor" # contains either term >>> filter = RedisText("job") % "engineer doctor" # contains both terms """ - self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LIKE) # type: ignore + self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LIKE) # type: ignore[arg-type] return RedisFilterExpression(str(self)) def __str__(self) -> str: diff --git a/libs/community/langchain_community/vectorstores/redis/schema.py b/libs/community/langchain_community/vectorstores/redis/schema.py index cd1920799e6..c613c9a7621 100644 --- a/libs/community/langchain_community/vectorstores/redis/schema.py +++ b/libs/community/langchain_community/vectorstores/redis/schema.py @@ -14,7 +14,7 @@ from typing_extensions import TYPE_CHECKING, Literal from langchain_community.vectorstores.redis.constants import REDIS_VECTOR_DTYPE_MAP if TYPE_CHECKING: - from redis.commands.search.field import ( # type: ignore + from redis.commands.search.field import ( NumericField, TagField, TextField, @@ -47,13 +47,13 @@ class TextFieldSchema(RedisField): sortable: Optional[bool] = False def as_field(self) -> TextField: - from redis.commands.search.field import TextField # type: ignore + from redis.commands.search.field import TextField return TextField( self.name, weight=self.weight, no_stem=self.no_stem, - phonetic_matcher=self.phonetic_matcher, # type: ignore + phonetic_matcher=self.phonetic_matcher, sortable=self.sortable, no_index=self.no_index, ) @@ -68,7 +68,7 @@ class TagFieldSchema(RedisField): sortable: Optional[bool] = False def as_field(self) -> TagField: - from redis.commands.search.field import TagField # type: ignore + from redis.commands.search.field import TagField return TagField( self.name, @@ -86,7 +86,7 @@ class NumericFieldSchema(RedisField): sortable: Optional[bool] = False def as_field(self) -> NumericField: - from redis.commands.search.field import NumericField # type: ignore + from redis.commands.search.field import NumericField return NumericField(self.name, sortable=self.sortable, no_index=self.no_index) @@ -131,7 +131,7 @@ class FlatVectorField(RedisVectorField): # type: ignore[override] block_size: Optional[int] = None def as_field(self) -> VectorField: - from redis.commands.search.field import VectorField # type: ignore + from redis.commands.search.field import VectorField field_data = super()._fields() if self.block_size is not None: @@ -149,7 +149,7 @@ class HNSWVectorField(RedisVectorField): # type: ignore[override] epsilon: float = Field(default=0.01) def as_field(self) -> VectorField: - from redis.commands.search.field import VectorField # type: ignore + from redis.commands.search.field import VectorField field_data = super()._fields() field_data.update( @@ -193,9 +193,9 @@ class RedisModel(BaseModel): # ignore types as pydantic is handling type validation and conversion if vector_field["algorithm"] == "FLAT": - self.vector.append(FlatVectorField(**vector_field)) # type: ignore + self.vector.append(FlatVectorField(**vector_field)) elif vector_field["algorithm"] == "HNSW": - self.vector.append(HNSWVectorField(**vector_field)) # type: ignore + self.vector.append(HNSWVectorField(**vector_field)) else: raise ValueError( f"algorithm must be either FLAT or HNSW. Got " diff --git a/libs/community/langchain_community/vectorstores/supabase.py b/libs/community/langchain_community/vectorstores/supabase.py index 0012ac449d2..74c52d96bed 100644 --- a/libs/community/langchain_community/vectorstores/supabase.py +++ b/libs/community/langchain_community/vectorstores/supabase.py @@ -257,7 +257,7 @@ class SupabaseVectorStore(VectorStore): match_result = [ ( Document( - metadata=search.get("metadata", {}), # type: ignore + metadata=search.get("metadata", {}), page_content=search.get("content", ""), ), search.get("similarity", 0.0), @@ -302,7 +302,7 @@ class SupabaseVectorStore(VectorStore): match_result = [ ( Document( - metadata=search.get("metadata", {}), # type: ignore + metadata=search.get("metadata", {}), page_content=search.get("content", ""), ), search.get("similarity", 0.0), @@ -351,7 +351,7 @@ class SupabaseVectorStore(VectorStore): "id": ids[idx], "content": documents[idx].page_content, "embedding": embedding, - "metadata": documents[idx].metadata, # type: ignore + "metadata": documents[idx].metadata, **kwargs, } for idx, embedding in enumerate(vectors) @@ -360,7 +360,7 @@ class SupabaseVectorStore(VectorStore): for i in range(0, len(rows), chunk_size): chunk = rows[i : i + chunk_size] - result = client.from_(table_name).upsert(chunk).execute() # type: ignore + result = client.from_(table_name).upsert(chunk).execute() if len(result.data) == 0: raise Exception("Error inserting: No rows added") diff --git a/libs/community/langchain_community/vectorstores/upstash.py b/libs/community/langchain_community/vectorstores/upstash.py index 23aeec0cc3c..692be6b05c1 100644 --- a/libs/community/langchain_community/vectorstores/upstash.py +++ b/libs/community/langchain_community/vectorstores/upstash.py @@ -153,7 +153,7 @@ class UpstashVectorStore(VectorStore): self._namespace = namespace @property - def embeddings(self) -> Optional[Union[Embeddings, bool]]: # type: ignore + def embeddings(self) -> Optional[Union[Embeddings, bool]]: # type: ignore[override] """Access the query embedding object if available.""" return self._embeddings @@ -730,7 +730,7 @@ class UpstashVectorStore(VectorStore): ) selected = [results[i].metadata for i in mmr_selected] return [ - Document(page_content=metadata.pop((self._text_key)), metadata=metadata) # type: ignore + Document(page_content=metadata.pop((self._text_key)), metadata=metadata) for metadata in selected ] @@ -798,7 +798,7 @@ class UpstashVectorStore(VectorStore): ) selected = [results[i].metadata for i in mmr_selected] return [ - Document(page_content=metadata.pop((self._text_key)), metadata=metadata) # type: ignore + Document(page_content=metadata.pop((self._text_key)), metadata=metadata) for metadata in selected ] diff --git a/libs/community/langchain_community/vectorstores/vectara.py b/libs/community/langchain_community/vectorstores/vectara.py index be3b2300b7a..a5d80589525 100644 --- a/libs/community/langchain_community/vectorstores/vectara.py +++ b/libs/community/langchain_community/vectorstores/vectara.py @@ -467,7 +467,7 @@ class Vectara(VectorStore): } if config.lambda_val > 0: - body["query"][0]["corpusKey"][0]["lexicalInterpolationConfig"] = { # type: ignore + body["query"][0]["corpusKey"][0]["lexicalInterpolationConfig"] = { # type: ignore[index] "lambda": config.lambda_val } @@ -495,7 +495,7 @@ class Vectara(VectorStore): } ] if chat: - body["query"][0]["summary"][0]["chat"] = { # type: ignore + body["query"][0]["summary"][0]["chat"] = { # type: ignore[index] "store": True, "conversationId": chat_conv_id, } diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml index 1b24cc9384c..93c223b2db7 100644 --- a/libs/community/pyproject.toml +++ b/libs/community/pyproject.toml @@ -65,7 +65,7 @@ lint = [ ] dev = ["jupyter<2.0.0,>=1.0.0", "setuptools<68.0.0,>=67.6.1", "langchain-core"] typing = [ - "mypy<2.0,>=1.12", + "mypy<2.0,>=1.15", "types-pyyaml<7.0.0.0,>=6.0.12.2", "types-requests<3.0.0.0,>=2.28.11.5", "types-toml<1.0.0.0,>=0.10.8.1", @@ -103,7 +103,7 @@ ignore-regex = ".*(Stati Uniti|Tense=Pres).*" ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin,cann" [tool.ruff.lint] -select = ["E", "F", "I", "T201"] +select = ["E", "F", "I", "PGH003", "T201"] [tool.coverage.run] omit = ["tests/*"] diff --git a/libs/community/tests/integration_tests/cache/test_gptcache.py b/libs/community/tests/integration_tests/cache/test_gptcache.py index 4126dd9e3d9..11e529649bc 100644 --- a/libs/community/tests/integration_tests/cache/test_gptcache.py +++ b/libs/community/tests/integration_tests/cache/test_gptcache.py @@ -27,7 +27,7 @@ def init_gptcache_map(cache_obj: Any) -> None: pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=cache_path), ) - init_gptcache_map._i = i + 1 # type: ignore + init_gptcache_map._i = i + 1 # type: ignore[attr-defined] def init_gptcache_map_with_llm(cache_obj: Any, llm: str) -> None: diff --git a/libs/community/tests/integration_tests/chat_message_histories/test_zep.py b/libs/community/tests/integration_tests/chat_message_histories/test_zep.py index 9391f84cc8f..566e7dadab1 100644 --- a/libs/community/tests/integration_tests/chat_message_histories/test_zep.py +++ b/libs/community/tests/integration_tests/chat_message_histories/test_zep.py @@ -37,7 +37,7 @@ def test_messages(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> Non Message(content="message2", role="human", metadata={"key2": "value2"}), ], ) - zep_chat.zep_client.memory.get_memory.return_value = mock_memory # type: ignore + zep_chat.zep_client.memory.get_memory.return_value = mock_memory result = zep_chat.messages @@ -52,25 +52,25 @@ def test_add_user_message( mocker: MockerFixture, zep_chat: ZepChatMessageHistory ) -> None: zep_chat.add_user_message("test message") - zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore + zep_chat.zep_client.memory.add_memory.assert_called_once() @pytest.mark.requires("zep_python") def test_add_ai_message(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None: zep_chat.add_ai_message("test message") - zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore + zep_chat.zep_client.memory.add_memory.assert_called_once() @pytest.mark.requires("zep_python") def test_append(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None: zep_chat.add_message(AIMessage(content="test message")) - zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore + zep_chat.zep_client.memory.add_memory.assert_called_once() @pytest.mark.requires("zep_python") def test_search(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None: zep_chat.search("test query") - zep_chat.zep_client.memory.search_memory.assert_called_once_with( # type: ignore + zep_chat.zep_client.memory.search_memory.assert_called_once_with( "test_session", mocker.ANY, limit=None ) @@ -78,6 +78,4 @@ def test_search(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None: @pytest.mark.requires("zep_python") def test_clear(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None: zep_chat.clear() - zep_chat.zep_client.memory.delete_memory.assert_called_once_with( # type: ignore - "test_session" - ) + zep_chat.zep_client.memory.delete_memory.assert_called_once_with("test_session") diff --git a/libs/community/tests/integration_tests/chat_models/test_minimax.py b/libs/community/tests/integration_tests/chat_models/test_minimax.py index 88199f07535..45127107f58 100644 --- a/libs/community/tests/integration_tests/chat_models/test_minimax.py +++ b/libs/community/tests/integration_tests/chat_models/test_minimax.py @@ -67,7 +67,7 @@ class AnswerWithJustification(BaseModel): def test_chat_minimax_with_structured_output() -> None: """Test MiniMaxChat with structured output.""" - llm = MiniMaxChat() # type: ignore + llm = MiniMaxChat() # type: ignore[call-arg] structured_llm = llm.with_structured_output(AnswerWithJustification) response = structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" @@ -77,7 +77,7 @@ def test_chat_minimax_with_structured_output() -> None: def test_chat_tongyi_with_structured_output_include_raw() -> None: """Test MiniMaxChat with structured output.""" - llm = MiniMaxChat() # type: ignore + llm = MiniMaxChat() # type: ignore[call-arg] structured_llm = llm.with_structured_output( AnswerWithJustification, include_raw=True ) diff --git a/libs/community/tests/integration_tests/chat_models/test_tongyi.py b/libs/community/tests/integration_tests/chat_models/test_tongyi.py index e6884e65353..50d1d81c99d 100644 --- a/libs/community/tests/integration_tests/chat_models/test_tongyi.py +++ b/libs/community/tests/integration_tests/chat_models/test_tongyi.py @@ -170,7 +170,7 @@ class GenerateUsername(BaseModel): def test_tool_use() -> None: - llm = ChatTongyi(model="qwen-turbo", temperature=0) # type: ignore + llm = ChatTongyi(model="qwen-turbo", temperature=0) # type: ignore[call-arg] llm_with_tool = llm.bind_tools(tools=[GenerateUsername]) msgs: List = [ HumanMessage(content="Sally has green hair, what would her username be?") @@ -187,7 +187,7 @@ def test_tool_use() -> None: tool_msg = ToolMessage( content="sally_green_hair", - tool_call_id=ai_msg.tool_calls[0]["id"], # type: ignore + tool_call_id=ai_msg.tool_calls[0]["id"], name=ai_msg.tool_calls[0]["name"], ) msgs.extend([ai_msg, tool_msg]) @@ -201,7 +201,7 @@ def test_tool_use() -> None: gathered = message first = False else: - gathered = gathered + message # type: ignore + gathered = gathered + message # type: ignore[assignment] assert isinstance(gathered, AIMessageChunk) streaming_tool_msg = ToolMessage( @@ -215,7 +215,7 @@ def test_tool_use() -> None: def test_manual_tool_call_msg() -> None: """Test passing in manually construct tool call message.""" - llm = ChatTongyi(model="qwen-turbo", temperature=0) # type: ignore + llm = ChatTongyi(model="qwen-turbo", temperature=0) # type: ignore[call-arg] llm_with_tool = llm.bind_tools(tools=[GenerateUsername]) msgs: List = [ HumanMessage(content="Sally has green hair, what would her username be?"), @@ -246,7 +246,7 @@ class AnswerWithJustification(BaseModel): def test_chat_tongyi_with_structured_output() -> None: """Test ChatTongyi with structured output.""" - llm = ChatTongyi() # type: ignore + llm = ChatTongyi() # type: ignore[call-arg] structured_llm = llm.with_structured_output(AnswerWithJustification) response = structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" @@ -256,7 +256,7 @@ def test_chat_tongyi_with_structured_output() -> None: def test_chat_tongyi_with_structured_output_include_raw() -> None: """Test ChatTongyi with structured output.""" - llm = ChatTongyi() # type: ignore + llm = ChatTongyi() # type: ignore[call-arg] structured_llm = llm.with_structured_output( AnswerWithJustification, include_raw=True ) diff --git a/libs/community/tests/integration_tests/chat_models/test_vertexai.py b/libs/community/tests/integration_tests/chat_models/test_vertexai.py index 37c1a8ecfde..585476de561 100644 --- a/libs/community/tests/integration_tests/chat_models/test_vertexai.py +++ b/libs/community/tests/integration_tests/chat_models/test_vertexai.py @@ -75,7 +75,7 @@ async def test_vertexai_agenerate(model_name: str) -> None: message = HumanMessage(content="Hello") response = await model.agenerate([[message]]) assert isinstance(response, LLMResult) - assert isinstance(response.generations[0][0].message, AIMessage) # type: ignore + assert isinstance(response.generations[0][0].message, AIMessage) # type: ignore[union-attr] sync_response = model.generate([[message]]) assert response.generations[0][0] == sync_response.generations[0][0] diff --git a/libs/community/tests/integration_tests/document_loaders/test_quip.py b/libs/community/tests/integration_tests/document_loaders/test_quip.py index 42c1404cc15..ca7573cdf5a 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_quip.py +++ b/libs/community/tests/integration_tests/document_loaders/test_quip.py @@ -1,4 +1,6 @@ -from typing import Dict +from __future__ import annotations + +from typing import TYPE_CHECKING, Dict from unittest.mock import MagicMock, patch import pytest @@ -6,6 +8,9 @@ from langchain_core.documents import Document from langchain_community.document_loaders.quip import QuipLoader +if TYPE_CHECKING: + from collections.abc import Iterator + try: from quip_api.quip import QuipClient # noqa: F401 @@ -15,7 +20,7 @@ except ImportError: @pytest.fixture -def mock_quip(): # type: ignore +def mock_quip() -> Iterator[MagicMock]: # mock quip_client with patch("quip_api.quip.QuipClient") as mock_quip: yield mock_quip diff --git a/libs/community/tests/integration_tests/document_loaders/test_unstructured.py b/libs/community/tests/integration_tests/document_loaders/test_unstructured.py index 5bdd30f2c2e..abd55862fc3 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_unstructured.py +++ b/libs/community/tests/integration_tests/document_loaders/test_unstructured.py @@ -105,7 +105,7 @@ def test_unstructured_api_file_loader_io_multiple_files() -> None: files = [stack.enter_context(open(file_path, "rb")) for file_path in file_paths] loader = UnstructuredAPIFileIOLoader( - file=files, # type: ignore + file=files, api_key="FAKE_API_KEY", strategy="fast", mode="elements", diff --git a/libs/community/tests/integration_tests/embeddings/test_ipex_llm.py b/libs/community/tests/integration_tests/embeddings/test_ipex_llm.py index 30a7c96d700..424518cd62a 100644 --- a/libs/community/tests/integration_tests/embeddings/test_ipex_llm.py +++ b/libs/community/tests/integration_tests/embeddings/test_ipex_llm.py @@ -11,7 +11,7 @@ skip_if_no_model_ids = pytest.mark.skipif( not model_ids_to_test, reason="TEST_IPEXLLM_BGE_EMBEDDING_MODEL_IDS environment variable not set.", ) -model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore +model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore[assignment] device = os.getenv("TEST_IPEXLLM_BGE_EMBEDDING_MODEL_DEVICE") or "cpu" diff --git a/libs/community/tests/integration_tests/llms/test_bigdl_llm.py b/libs/community/tests/integration_tests/llms/test_bigdl_llm.py index 4d53720d905..c6049180bd1 100644 --- a/libs/community/tests/integration_tests/llms/test_bigdl_llm.py +++ b/libs/community/tests/integration_tests/llms/test_bigdl_llm.py @@ -12,7 +12,7 @@ skip_if_no_model_ids = pytest.mark.skipif( not model_ids_to_test, reason="TEST_BIGDLLLM_MODEL_IDS environment variable not set.", ) -model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore +model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore[assignment] @skip_if_no_model_ids diff --git a/libs/community/tests/integration_tests/llms/test_ipex_llm.py b/libs/community/tests/integration_tests/llms/test_ipex_llm.py index 0fc2b5caa53..e48bcbd7b34 100644 --- a/libs/community/tests/integration_tests/llms/test_ipex_llm.py +++ b/libs/community/tests/integration_tests/llms/test_ipex_llm.py @@ -12,7 +12,7 @@ model_ids_to_test = os.getenv("TEST_IPEXLLM_MODEL_IDS") or "" skip_if_no_model_ids = pytest.mark.skipif( not model_ids_to_test, reason="TEST_IPEXLLM_MODEL_IDS environment variable not set." ) -model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore +model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore[assignment] device = os.getenv("TEST_IPEXLLM_MODEL_DEVICE") or "cpu" diff --git a/libs/community/tests/integration_tests/llms/test_opaqueprompts.py b/libs/community/tests/integration_tests/llms/test_opaqueprompts.py index e13bc33c369..0998b5319b5 100644 --- a/libs/community/tests/integration_tests/llms/test_opaqueprompts.py +++ b/libs/community/tests/integration_tests/llms/test_opaqueprompts.py @@ -59,8 +59,8 @@ def test_opaqueprompts_functions() -> None: pg_chain = ( op.sanitize | RunnableParallel( - secure_context=lambda x: x["secure_context"], # type: ignore - response=(lambda x: x["sanitized_input"]) # type: ignore + secure_context=lambda x: x["secure_context"], + response=(lambda x: x["sanitized_input"]) # type: ignore[operator] | prompt | llm | StrOutputParser(), diff --git a/libs/community/tests/integration_tests/llms/test_self_hosted_llm.py b/libs/community/tests/integration_tests/llms/test_self_hosted_llm.py index 076a201f621..c2d2c652379 100644 --- a/libs/community/tests/integration_tests/llms/test_self_hosted_llm.py +++ b/libs/community/tests/integration_tests/llms/test_self_hosted_llm.py @@ -25,7 +25,7 @@ def test_self_hosted_huggingface_pipeline_text_generation() -> None: hardware=gpu, model_reqs=model_reqs, ) - output = llm.invoke("Say foo:") # type: ignore + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -38,7 +38,7 @@ def test_self_hosted_huggingface_pipeline_text2text_generation() -> None: hardware=gpu, model_reqs=model_reqs, ) - output = llm.invoke("Say foo:") # type: ignore + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -83,7 +83,7 @@ def test_init_with_local_pipeline() -> None: model_reqs=model_reqs, inference_fn=inference_fn, ) - output = llm.invoke("Say foo:") # type: ignore + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -102,7 +102,7 @@ def test_init_with_pipeline_path() -> None: model_reqs=model_reqs, inference_fn=inference_fn, ) - output = llm.invoke("Say foo:") # type: ignore + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -115,5 +115,5 @@ def test_init_with_pipeline_fn() -> None: model_reqs=model_reqs, inference_fn=inference_fn, ) - output = llm.invoke("Say foo:") # type: ignore + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py b/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py index 4bfa7b7937f..87a2d80e1b4 100644 --- a/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py +++ b/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py @@ -40,8 +40,8 @@ def init_weaviate() -> Generator[ # When initializing the Weaviate index, denote the field # you want to search on with `is_embedding=True` title: str - title_embedding: NdArray[32] = Field(is_embedding=True) # type: ignore - other_emb: NdArray[32] # type: ignore + title_embedding: NdArray[32] = Field(is_embedding=True) # type: ignore[call-overload] + other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) @@ -87,8 +87,8 @@ def init_elastic() -> Generator[ class MyDoc(BaseDoc): title: str - title_embedding: NdArray[32] # type: ignore - other_emb: NdArray[32] # type: ignore + title_embedding: NdArray[32] + other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) @@ -124,8 +124,8 @@ def init_qdrant() -> Tuple[QdrantDocumentIndex, rest.Filter, FakeEmbeddings]: class MyDoc(BaseDoc): title: str - title_embedding: NdArray[32] # type: ignore - other_emb: NdArray[32] # type: ignore + title_embedding: NdArray[32] + other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) @@ -168,8 +168,8 @@ def init_in_memory() -> Tuple[InMemoryExactNNIndex, Dict[str, Any], FakeEmbeddin class MyDoc(BaseDoc): title: str - title_embedding: NdArray[32] # type: ignore - other_emb: NdArray[32] # type: ignore + title_embedding: NdArray[32] + other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) @@ -205,8 +205,8 @@ def init_hnsw( class MyDoc(BaseDoc): title: str - title_embedding: NdArray[32] # type: ignore - other_emb: NdArray[32] # type: ignore + title_embedding: NdArray[32] + other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) diff --git a/libs/community/tests/integration_tests/retrievers/test_zep.py b/libs/community/tests/integration_tests/retrievers/test_zep.py index a8ecd280329..75b62954d7f 100644 --- a/libs/community/tests/integration_tests/retrievers/test_zep.py +++ b/libs/community/tests/integration_tests/retrievers/test_zep.py @@ -61,12 +61,8 @@ def zep_retriever( mock_zep_client.memory = mocker.patch( "zep_python.memory.client.MemoryClient", autospec=True ) - mock_zep_client.memory.search_memory.return_value = copy.deepcopy( # type: ignore - search_results - ) - mock_zep_client.memory.asearch_memory.return_value = copy.deepcopy( # type: ignore - search_results - ) + mock_zep_client.memory.search_memory.return_value = copy.deepcopy(search_results) + mock_zep_client.memory.asearch_memory.return_value = copy.deepcopy(search_results) zep = ZepRetriever(session_id="123", url="http://localhost:8000") # type: ignore[call-arg] zep.zep_client = mock_zep_client return zep @@ -93,13 +89,7 @@ def _test_documents( ) -> None: assert len(documents) == 2 for i, document in enumerate(documents): - assert document.page_content == search_results[i].message.get( # type: ignore - "content" - ) - assert document.metadata.get("uuid") == search_results[i].message.get( # type: ignore - "uuid" - ) - assert document.metadata.get("role") == search_results[i].message.get( # type: ignore - "role" - ) + assert document.page_content == search_results[i].message.get("content") + assert document.metadata.get("uuid") == search_results[i].message.get("uuid") + assert document.metadata.get("role") == search_results[i].message.get("role") assert document.metadata.get("score") == search_results[i].dist diff --git a/libs/community/tests/integration_tests/utilities/test_serpapi.py b/libs/community/tests/integration_tests/utilities/test_serpapi.py index becd4006650..a604d425309 100644 --- a/libs/community/tests/integration_tests/utilities/test_serpapi.py +++ b/libs/community/tests/integration_tests/utilities/test_serpapi.py @@ -5,6 +5,6 @@ from langchain_community.utilities import SerpAPIWrapper def test_call() -> None: """Test that call gives the correct answer.""" - chain = SerpAPIWrapper() # type: ignore[call-arg] + chain = SerpAPIWrapper() output = chain.run("What was Obama's first name?") assert output == "Barack Hussein Obama II" diff --git a/libs/community/tests/integration_tests/utilities/test_stackexchange.py b/libs/community/tests/integration_tests/utilities/test_stackexchange.py index ac1fd28670f..9ba3e2001fc 100644 --- a/libs/community/tests/integration_tests/utilities/test_stackexchange.py +++ b/libs/community/tests/integration_tests/utilities/test_stackexchange.py @@ -5,20 +5,20 @@ from langchain_community.utilities import StackExchangeAPIWrapper def test_call() -> None: """Test that call runs.""" - stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg] + stackexchange = StackExchangeAPIWrapper() output = stackexchange.run("zsh: command not found: python") assert output != "hello" def test_failure() -> None: """Test that call that doesn't run.""" - stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg] + stackexchange = StackExchangeAPIWrapper() output = stackexchange.run("sjefbsmnf") assert output == "No relevant results found for 'sjefbsmnf' on Stack Overflow" def test_success() -> None: """Test that call that doesn't run.""" - stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg] + stackexchange = StackExchangeAPIWrapper() output = stackexchange.run("zsh: command not found: python") assert "zsh: command not found: python" in output diff --git a/libs/community/tests/integration_tests/vectorstores/qdrant/test_from_texts.py b/libs/community/tests/integration_tests/vectorstores/qdrant/test_from_texts.py index 5b00ea9f98f..27264c73568 100644 --- a/libs/community/tests/integration_tests/vectorstores/qdrant/test_from_texts.py +++ b/libs/community/tests/integration_tests/vectorstores/qdrant/test_from_texts.py @@ -284,6 +284,6 @@ def test_from_texts_passed_optimizers_config_and_on_disk_payload() -> None: ) collection_info = vec_store.client.get_collection(collection_name) - assert collection_info.config.params.vectors.on_disk is True # type: ignore + assert collection_info.config.params.vectors.on_disk is True assert collection_info.config.optimizer_config.memmap_threshold == 1000 assert collection_info.config.params.on_disk_payload is True diff --git a/libs/community/tests/integration_tests/vectorstores/test_chroma.py b/libs/community/tests/integration_tests/vectorstores/test_chroma.py index a41c983a16f..1f93e11a11f 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_chroma.py +++ b/libs/community/tests/integration_tests/vectorstores/test_chroma.py @@ -342,7 +342,7 @@ def test_chroma_large_batch() -> None: embedding_function = Fak(size=255) col = client.get_or_create_collection( "my_collection", - embedding_function=embedding_function.embed_documents, # type: ignore + embedding_function=embedding_function.embed_documents, ) docs = ["This is a test document"] * (client.max_batch_size + 100) # type: ignore[attr-defined] Chroma.from_texts( @@ -370,7 +370,7 @@ def test_chroma_large_batch_update() -> None: embedding_function = Fak(size=255) col = client.get_or_create_collection( "my_collection", - embedding_function=embedding_function.embed_documents, # type: ignore + embedding_function=embedding_function.embed_documents, ) docs = ["This is a test document"] * (client.max_batch_size + 100) # type: ignore[attr-defined] ids = [str(uuid.uuid4()) for _ in range(len(docs))] @@ -406,7 +406,7 @@ def test_chroma_legacy_batching() -> None: embedding_function = Fak(size=255) col = client.get_or_create_collection( "my_collection", - embedding_function=embedding_function.embed_documents, # type: ignore + embedding_function=embedding_function.embed_documents, ) docs = ["This is a test document"] * 100 Chroma.from_texts( diff --git a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py index ecde8eb5474..abdcd016a6c 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py +++ b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py @@ -99,13 +99,15 @@ class TestElasticsearch: def es_client(self) -> Any: # Running this integration test with Elastic Cloud # Required for in-stack inference testing (ELSER + model_id) - from elastic_transport import Transport + from elastic_transport import Transport, TransportApiResponse from elasticsearch import Elasticsearch class CustomTransport(Transport): requests = [] - def perform_request(self, *args, **kwargs): # type: ignore + def perform_request( + self, *args: Any, **kwargs: Any + ) -> TransportApiResponse: self.requests.append(kwargs) return super().perform_request(*args, **kwargs) @@ -934,4 +936,4 @@ class TestElasticsearch: ) # 1 for index exist, 1 for index create, 3 for index docs - assert len(es_client.transport.requests) == 5 # type: ignore + assert len(es_client.transport.requests) == 5 diff --git a/libs/community/tests/integration_tests/vectorstores/test_lancedb.py b/libs/community/tests/integration_tests/vectorstores/test_lancedb.py index a322d95556a..844ac4bfb64 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_lancedb.py +++ b/libs/community/tests/integration_tests/vectorstores/test_lancedb.py @@ -112,8 +112,8 @@ def test_lancedb_all_searches() -> None: result_3 = store.similarity_search_by_vector_with_relevance_scores( embeddings.embed_query("text 1") ) - assert len(result_3[0]) == 2 # type: ignore - assert "text 1" in result_3[0][0].page_content # type: ignore + assert len(result_3[0]) == 2 + assert "text 1" in result_3[0][0].page_content @pytest.mark.requires("lancedb") diff --git a/libs/community/tests/integration_tests/vectorstores/test_pgvector.py b/libs/community/tests/integration_tests/vectorstores/test_pgvector.py index 4579803402c..999da258aa8 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_pgvector.py +++ b/libs/community/tests/integration_tests/vectorstores/test_pgvector.py @@ -285,14 +285,14 @@ def test_pgvector_delete_docs() -> None: records = list(session.query(docsearch.EmbeddingStore).all()) # ignoring type error since mypy cannot determine whether # the list is sortable - assert sorted(record.custom_id for record in records) == ["3"] # type: ignore + assert sorted(record.custom_id for record in records) == ["3"] docsearch.delete(["2", "3"]) # Should not raise on missing ids with docsearch._make_session() as session: records = list(session.query(docsearch.EmbeddingStore).all()) # ignoring type error since mypy cannot determine whether # the list is sortable - assert sorted(record.custom_id for record in records) == [] # type: ignore + assert sorted(record.custom_id for record in records) == [] def test_pgvector_relevance_score() -> None: diff --git a/libs/community/tests/integration_tests/vectorstores/test_tidb_vector.py b/libs/community/tests/integration_tests/vectorstores/test_tidb_vector.py index b30be79356e..0451bc50a6c 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_tidb_vector.py +++ b/libs/community/tests/integration_tests/vectorstores/test_tidb_vector.py @@ -54,7 +54,7 @@ def test_search() -> None: with docsearch.tidb_vector_client._make_session() as session: records = list(session.query(docsearch.tidb_vector_client._table_model).all()) - assert len([record.id for record in records]) == 3 # type: ignore + assert len([record.id for record in records]) == 3 session.close() output = docsearch.similarity_search("foo", k=1) diff --git a/libs/community/tests/integration_tests/vectorstores/test_vectara.py b/libs/community/tests/integration_tests/vectorstores/test_vectara.py index 958fda1087c..a09a5b0b912 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_vectara.py +++ b/libs/community/tests/integration_tests/vectorstores/test_vectara.py @@ -105,7 +105,7 @@ def test_vectara_add_documents(vectara1: Vectara) -> None: def vectara2() -> Generator[Vectara, None, None]: # download documents to local storage and then upload as files # attention paper and deep learning book - vectara2: Vectara = Vectara() # type: ignore + vectara2: Vectara = Vectara() urls = [ ( diff --git a/libs/community/tests/integration_tests/vectorstores/test_zep.py b/libs/community/tests/integration_tests/vectorstores/test_zep.py index 09b269bb3e0..bc4e76356e9 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_zep.py +++ b/libs/community/tests/integration_tests/vectorstores/test_zep.py @@ -171,7 +171,7 @@ def test_from_texts( api_url="http://localhost:8000", ) - vs._collection.add_documents.assert_called_once_with( # type: ignore + vs._collection.add_documents.assert_called_once_with( texts_metadatas_as_zep_documents ) @@ -185,7 +185,7 @@ def test_add_documents( ) -> None: zep_vectorstore.add_documents(mock_documents) - mock_collection.add_documents.assert_called_once_with( # type: ignore + mock_collection.add_documents.assert_called_once_with( texts_metadatas_as_zep_documents ) diff --git a/libs/community/tests/unit_tests/agents/test_openai_assistant.py b/libs/community/tests/unit_tests/agents/test_openai_assistant.py index ea99ab5ccd7..0fc8e005223 100644 --- a/libs/community/tests/unit_tests/agents/test_openai_assistant.py +++ b/libs/community/tests/unit_tests/agents/test_openai_assistant.py @@ -8,7 +8,7 @@ from langchain_community.agents.openai_assistant import OpenAIAssistantV2Runnabl def _create_mock_client(*args: Any, use_async: bool = False, **kwargs: Any) -> Any: client = AsyncMock() if use_async else MagicMock() - client.beta.threads.runs.create = MagicMock(return_value=None) # type: ignore + client.beta.threads.runs.create = MagicMock(return_value=None) return client diff --git a/libs/community/tests/unit_tests/agents/test_tools.py b/libs/community/tests/unit_tests/agents/test_tools.py index d22c767108c..19fee1ca036 100644 --- a/libs/community/tests/unit_tests/agents/test_tools.py +++ b/libs/community/tests/unit_tests/agents/test_tools.py @@ -42,10 +42,9 @@ def test_single_input_agent_raises_error_on_structured_tool( with pytest.raises( ValueError, - match=f"{agent_cls.__name__} does not support" # type: ignore - f" multi-input tool the_tool.", + match=f"{agent_cls.__name__} does not support multi-input tool the_tool.", ): - agent_cls.from_llm_and_tools(MagicMock(), [the_tool]) # type: ignore + agent_cls.from_llm_and_tools(MagicMock(), [the_tool]) def test_tool_no_args_specified_assumes_str() -> None: @@ -85,8 +84,8 @@ def test_load_tools_with_callbacks_is_called() -> None: """Test callbacks are called when provided to load_tools fn.""" callbacks = [FakeCallbackHandler()] tools = load_tools( - ["requests_get"], # type: ignore - callbacks=callbacks, # type: ignore + ["requests_get"], + callbacks=callbacks, # type: ignore[arg-type] allow_dangerous_tools=True, ) assert len(tools) == 1 diff --git a/libs/community/tests/unit_tests/callbacks/fake_callback_handler.py b/libs/community/tests/unit_tests/callbacks/fake_callback_handler.py index b6838b3c85c..ab580caf824 100644 --- a/libs/community/tests/unit_tests/callbacks/fake_callback_handler.py +++ b/libs/community/tests/unit_tests/callbacks/fake_callback_handler.py @@ -254,7 +254,9 @@ class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin): ) -> Any: self.on_retriever_error_common() - def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore + def __deepcopy__( + self, memo: Optional[dict[int, Any]] = None + ) -> "FakeCallbackHandler": return self @@ -388,5 +390,7 @@ class FakeAsyncCallbackHandler(AsyncCallbackHandler, BaseFakeCallbackHandlerMixi ) -> None: self.on_text_common() - def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore + def __deepcopy__( + self, memo: Optional[dict[int, Any]] = None + ) -> "FakeAsyncCallbackHandler": return self diff --git a/libs/community/tests/unit_tests/callbacks/test_callback_manager.py b/libs/community/tests/unit_tests/callbacks/test_callback_manager.py index 9f7f53554a7..5ef01057625 100644 --- a/libs/community/tests/unit_tests/callbacks/test_callback_manager.py +++ b/libs/community/tests/unit_tests/callbacks/test_callback_manager.py @@ -121,4 +121,4 @@ def test_callback_manager_configure_context_vars( assert cb.completion_tokens == 1 assert cb.total_cost > 0 wait_for_all_tracers() - assert LangChainTracer._persist_run_single.call_count == 4 # type: ignore + assert LangChainTracer._persist_run_single.call_count == 4 # type: ignore[attr-defined] diff --git a/libs/community/tests/unit_tests/chat_message_histories/test_sql.py b/libs/community/tests/unit_tests/chat_message_histories/test_sql.py index c62e5159a07..d81619cc759 100644 --- a/libs/community/tests/unit_tests/chat_message_histories/test_sql.py +++ b/libs/community/tests/unit_tests/chat_message_histories/test_sql.py @@ -15,7 +15,7 @@ except ImportError: # for sqlalchemy < 2 from sqlalchemy.ext.declarative import declarative_base - Base = declarative_base() # type:ignore + Base = declarative_base() # type:ignore[misc] from langchain_community.chat_message_histories import SQLChatMessageHistory from langchain_community.chat_message_histories.sql import DefaultMessageConverter diff --git a/libs/community/tests/unit_tests/chat_models/test_oci_data_science.py b/libs/community/tests/unit_tests/chat_models/test_oci_data_science.py index 081393df233..73a7b6a4775 100644 --- a/libs/community/tests/unit_tests/chat_models/test_oci_data_science.py +++ b/libs/community/tests/unit_tests/chat_models/test_oci_data_science.py @@ -157,7 +157,7 @@ def test_stream_vllm(*args: Any) -> None: if output is None: output = chunk else: - output += chunk + output += chunk # type: ignore[assignment] count += 1 assert count == 5 assert output is not None diff --git a/libs/community/tests/unit_tests/chat_models/test_reka.py b/libs/community/tests/unit_tests/chat_models/test_reka.py index bbacadf7fd9..4650de6e17b 100644 --- a/libs/community/tests/unit_tests/chat_models/test_reka.py +++ b/libs/community/tests/unit_tests/chat_models/test_reka.py @@ -38,7 +38,7 @@ def test_reka_model_kwargs() -> None: def test_reka_incorrect_field() -> None: """Test that providing an incorrect field raises ValidationError.""" with pytest.raises(ValidationError): - ChatReka(unknown_field="bar") # type: ignore + ChatReka(unknown_field="bar") # type: ignore[call-arg] @pytest.mark.skip( diff --git a/libs/community/tests/unit_tests/document_loaders/test_arcgis_loader.py b/libs/community/tests/unit_tests/document_loaders/test_arcgis_loader.py index 857556f007c..9c9abc8dc4e 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_arcgis_loader.py +++ b/libs/community/tests/unit_tests/document_loaders/test_arcgis_loader.py @@ -1,12 +1,21 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING from unittest.mock import MagicMock, patch import pytest from langchain_community.document_loaders import ArcGISLoader +if TYPE_CHECKING: + from collections.abc import Iterator + + from arcgis.features import FeatureLayer + from arcgis.gis import GIS + @pytest.fixture -def arcgis_mocks(mock_feature_layer, mock_gis): # type: ignore +def arcgis_mocks(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> Iterator[None]: sys_modules = { "arcgis": MagicMock(), "arcgis.features.FeatureLayer": mock_feature_layer, @@ -17,7 +26,7 @@ def arcgis_mocks(mock_feature_layer, mock_gis): # type: ignore @pytest.fixture -def mock_feature_layer(): # type: ignore +def mock_feature_layer() -> FeatureLayer: feature_layer = MagicMock() feature_layer.query.return_value = [ MagicMock(as_dict={"attributes": {"field": "value"}}) @@ -32,13 +41,14 @@ def mock_feature_layer(): # type: ignore @pytest.fixture -def mock_gis(): # type: ignore +def mock_gis() -> GIS: gis = MagicMock() gis.content.get.return_value = MagicMock(description="Item description") return gis -def test_lazy_load(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore +@pytest.mark.usefixtures("arcgis_mocks") +def test_lazy_load(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> None: loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis) loader.BEAUTIFULSOUP = None @@ -49,9 +59,10 @@ def test_lazy_load(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore # Add more assertions based on your expected behavior -def test_initialization_with_string_layer( # type: ignore - arcgis_mocks, mock_feature_layer, mock_gis -): +@pytest.mark.usefixtures("arcgis_mocks") +def test_initialization_with_string_layer( + mock_feature_layer: FeatureLayer, mock_gis: GIS +) -> None: layer_url = "https://example.com/layer_url" with patch("arcgis.features.FeatureLayer", return_value=mock_feature_layer): @@ -60,9 +71,10 @@ def test_initialization_with_string_layer( # type: ignore assert loader.url == layer_url -def test_layer_description_provided_by_user( # type: ignore - arcgis_mocks, mock_feature_layer, mock_gis -): +@pytest.mark.usefixtures("arcgis_mocks") +def test_layer_description_provided_by_user( + mock_feature_layer: FeatureLayer, mock_gis: GIS +) -> None: custom_description = "Custom Layer Description" loader = ArcGISLoader( layer=mock_feature_layer, gis=mock_gis, lyr_desc=custom_description @@ -73,7 +85,9 @@ def test_layer_description_provided_by_user( # type: ignore assert layer_properties["layer_description"] == custom_description -def test_initialization_without_arcgis(mock_feature_layer, mock_gis): # type: ignore +def test_initialization_without_arcgis( + mock_feature_layer: FeatureLayer, mock_gis: GIS +) -> None: with patch.dict("sys.modules", {"arcgis": None}): with pytest.raises( ImportError, match="arcgis is required to use the ArcGIS Loader" @@ -81,9 +95,10 @@ def test_initialization_without_arcgis(mock_feature_layer, mock_gis): # type: i ArcGISLoader(layer=mock_feature_layer, gis=mock_gis) -def test_get_layer_properties_with_description( # type: ignore - arcgis_mocks, mock_feature_layer, mock_gis -): +@pytest.mark.usefixtures("arcgis_mocks") +def test_get_layer_properties_with_description( + mock_feature_layer: FeatureLayer, mock_gis: GIS +) -> None: loader = ArcGISLoader( layer=mock_feature_layer, gis=mock_gis, lyr_desc="Custom Description" ) @@ -93,7 +108,8 @@ def test_get_layer_properties_with_description( # type: ignore assert props["layer_description"] == "Custom Description" -def test_load_method(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore +@pytest.mark.usefixtures("arcgis_mocks") +def test_load_method(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> None: loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis) documents = loader.load() @@ -101,7 +117,8 @@ def test_load_method(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignor assert len(documents) == 1 -def test_geometry_returned(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore +@pytest.mark.usefixtures("arcgis_mocks") +def test_geometry_returned(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> None: mock_feature_layer.query.return_value = [ MagicMock( as_dict={ @@ -117,9 +134,8 @@ def test_geometry_returned(arcgis_mocks, mock_feature_layer, mock_gis): # type: assert "geometry" in documents[0].metadata -def test_geometry_not_returned( # type: ignore - arcgis_mocks, mock_feature_layer, mock_gis -): +@pytest.mark.usefixtures("arcgis_mocks") +def test_geometry_not_returned(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> None: loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, return_geometry=False) documents = list(loader.lazy_load()) diff --git a/libs/community/tests/unit_tests/document_loaders/test_confluence.py b/libs/community/tests/unit_tests/document_loaders/test_confluence.py index 8075abdee19..666be39923b 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_confluence.py +++ b/libs/community/tests/unit_tests/document_loaders/test_confluence.py @@ -1,5 +1,7 @@ +from __future__ import annotations + import unittest -from typing import Any, Dict +from typing import TYPE_CHECKING, Any, Dict from unittest.mock import MagicMock, patch import pytest @@ -11,9 +13,12 @@ from langchain_community.document_loaders.confluence import ( ContentFormat, ) +if TYPE_CHECKING: + from collections.abc import Iterator + @pytest.fixture -def mock_confluence(): # type: ignore +def mock_confluence() -> Iterator[MagicMock]: with patch("atlassian.Confluence") as mock_confluence: yield mock_confluence diff --git a/libs/community/tests/unit_tests/document_loaders/test_detect_encoding.py b/libs/community/tests/unit_tests/document_loaders/test_detect_encoding.py index 5e8115086e9..20ba3f2afaa 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_detect_encoding.py +++ b/libs/community/tests/unit_tests/document_loaders/test_detect_encoding.py @@ -16,7 +16,7 @@ def test_loader_detect_encoding_text() -> None: str(path), glob="**/*.txt", loader_kwargs={"autodetect_encoding": True}, - loader_cls=TextLoader, # type: ignore + loader_cls=TextLoader, ) with pytest.raises((UnicodeDecodeError, RuntimeError)): @@ -49,13 +49,13 @@ def test_loader_detect_encoding_csv() -> None: loader = DirectoryLoader( str(path), glob="**/*.csv", - loader_cls=CSVLoader, # type: ignore + loader_cls=CSVLoader, ) loader_detect_encoding = DirectoryLoader( str(path), glob="**/*.csv", loader_kwargs={"autodetect_encoding": True}, - loader_cls=CSVLoader, # type: ignore + loader_cls=CSVLoader, ) with pytest.raises((UnicodeDecodeError, RuntimeError)): diff --git a/libs/community/tests/unit_tests/document_loaders/test_directory.py b/libs/community/tests/unit_tests/document_loaders/test_directory.py index 2b2440e504f..2d0fe418aa5 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_directory.py +++ b/libs/community/tests/unit_tests/document_loaders/test_directory.py @@ -48,7 +48,7 @@ def test_exclude_ignores_matching_files(tmp_path: Path) -> None: loader = DirectoryLoader( str(tmp_path), exclude=["*.py"], - loader_cls=CustomLoader, # type: ignore + loader_cls=CustomLoader, ) data = loader.load() assert len(data) == 1 diff --git a/libs/community/tests/unit_tests/document_loaders/test_github.py b/libs/community/tests/unit_tests/document_loaders/test_github.py index c37eb2db9ee..fcccde86e7a 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_github.py +++ b/libs/community/tests/unit_tests/document_loaders/test_github.py @@ -78,17 +78,17 @@ def test_parse_issue() -> None: "body": "This is an example issue 1", } expected_document = Document( - page_content=issue["body"], # type: ignore + page_content=issue["body"], # type: ignore[arg-type] metadata={ "url": issue["html_url"], "title": issue["title"], - "creator": issue["user"]["login"], # type: ignore + "creator": issue["user"]["login"], # type: ignore[index] "created_at": issue["created_at"], "comments": issue["comments"], "state": issue["state"], - "labels": [label["name"] for label in issue["labels"]], # type: ignore - "assignee": issue["assignee"]["login"], # type: ignore - "milestone": issue["milestone"]["title"], # type: ignore + "labels": [label["name"] for label in issue["labels"]], # type: ignore[attr-defined] + "assignee": issue["assignee"]["login"], # type: ignore[index] + "milestone": issue["milestone"]["title"], # type: ignore[index] "locked": issue["locked"], "number": issue["number"], "is_pull_request": False, diff --git a/libs/community/tests/unit_tests/document_loaders/test_hugging_face_model.py b/libs/community/tests/unit_tests/document_loaders/test_hugging_face_model.py index 17da8f31d5f..46a0b4c3909 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_hugging_face_model.py +++ b/libs/community/tests/unit_tests/document_loaders/test_hugging_face_model.py @@ -62,14 +62,14 @@ def test_load_models_with_readme() -> None: responses.add_callback( responses.GET, "https://huggingface.co/api/models", - callback=response_callback, # type: ignore + callback=response_callback, # type: ignore[arg-type] content_type="application/json", ) responses.add_callback( responses.GET, # Use a regex or update this placeholder "https://huggingface.co/microsoft/phi-2/raw/main/README.md", - callback=response_callback, # type: ignore + callback=response_callback, # type: ignore[arg-type] content_type="text/plain", ) diff --git a/libs/community/tests/unit_tests/document_loaders/test_psychic.py b/libs/community/tests/unit_tests/document_loaders/test_psychic.py index 94020e0dbb5..bb7dcfd9307 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_psychic.py +++ b/libs/community/tests/unit_tests/document_loaders/test_psychic.py @@ -1,4 +1,6 @@ -from typing import Dict +from __future__ import annotations + +from typing import TYPE_CHECKING, Dict from unittest.mock import MagicMock, patch import pytest @@ -6,15 +8,18 @@ from langchain_core.documents import Document from langchain_community.document_loaders.psychic import PsychicLoader +if TYPE_CHECKING: + from collections.abc import Iterator + @pytest.fixture -def mock_psychic(): # type: ignore +def mock_psychic() -> Iterator[MagicMock]: with patch("psychicapi.Psychic") as mock_psychic: yield mock_psychic @pytest.fixture -def mock_connector_id(): # type: ignore +def mock_connector_id() -> Iterator[MagicMock]: with patch("psychicapi.ConnectorId") as mock_connector_id: yield mock_connector_id diff --git a/libs/community/tests/unit_tests/evaluation/test_loading.py b/libs/community/tests/unit_tests/evaluation/test_loading.py index 55cab336889..85effdc5517 100644 --- a/libs/community/tests/unit_tests/evaluation/test_loading.py +++ b/libs/community/tests/unit_tests/evaluation/test_loading.py @@ -21,7 +21,7 @@ def test_load_evaluators(evaluator_type: EvaluatorType) -> None: # Test as string load_evaluators( - [evaluator_type.value], # type: ignore + [evaluator_type.value], # type: ignore[list-item] llm=fake_llm, embeddings=embeddings, ) diff --git a/libs/community/tests/unit_tests/indexes/test_sql_record_manager.py b/libs/community/tests/unit_tests/indexes/test_sql_record_manager.py index 9e15ad92552..01e77c5e7a9 100644 --- a/libs/community/tests/unit_tests/indexes/test_sql_record_manager.py +++ b/libs/community/tests/unit_tests/indexes/test_sql_record_manager.py @@ -20,7 +20,7 @@ def manager() -> SQLRecordManager: return record_manager -@pytest_asyncio.fixture # type: ignore +@pytest_asyncio.fixture @pytest.mark.requires("aiosqlite") async def amanager() -> SQLRecordManager: """Initialize the test database and yield the TimestampedSet instance.""" diff --git a/libs/community/tests/unit_tests/jira/test_jira_api_wrapper.py b/libs/community/tests/unit_tests/jira/test_jira_api_wrapper.py index 68c66b9e404..3b5d268dc51 100644 --- a/libs/community/tests/unit_tests/jira/test_jira_api_wrapper.py +++ b/libs/community/tests/unit_tests/jira/test_jira_api_wrapper.py @@ -1,13 +1,19 @@ +from __future__ import annotations + import json +from typing import TYPE_CHECKING from unittest.mock import MagicMock, patch import pytest from langchain_community.utilities.jira import JiraAPIWrapper +if TYPE_CHECKING: + from collections.abc import Iterator + @pytest.fixture -def mock_jira(): # type: ignore +def mock_jira() -> Iterator[MagicMock]: with patch("atlassian.Jira") as mock_jira: yield mock_jira diff --git a/libs/community/tests/unit_tests/llms/test_aleph_alpha.py b/libs/community/tests/unit_tests/llms/test_aleph_alpha.py index 24b2d0433dc..1b3979fd674 100644 --- a/libs/community/tests/unit_tests/llms/test_aleph_alpha.py +++ b/libs/community/tests/unit_tests/llms/test_aleph_alpha.py @@ -9,7 +9,7 @@ from langchain_community.llms.aleph_alpha import AlephAlpha @pytest.mark.requires("aleph_alpha_client") def test_api_key_is_secret_string() -> None: - llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore + llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.aleph_alpha_api_key, SecretStr) @@ -17,7 +17,7 @@ def test_api_key_is_secret_string() -> None: def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore + llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[arg-type] print(llm.aleph_alpha_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/llms/test_bedrock.py b/libs/community/tests/unit_tests/llms/test_bedrock.py index bb64f4d29d6..64468884e2e 100644 --- a/libs/community/tests/unit_tests/llms/test_bedrock.py +++ b/libs/community/tests/unit_tests/llms/test_bedrock.py @@ -299,7 +299,7 @@ async def test_bedrock_async_streaming_call() -> None: ) # Call the _astream method chunks = [ - json.loads(chunk["chunk"]["bytes"])["text"] # type: ignore + json.loads(chunk["chunk"]["bytes"])["text"] # type: ignore[index] async for chunk in llm._astream("Hey, how are you?") ] diff --git a/libs/community/tests/unit_tests/load/test_serializable.py b/libs/community/tests/unit_tests/load/test_serializable.py index 78eaa319b93..2d436fc53be 100644 --- a/libs/community/tests/unit_tests/load/test_serializable.py +++ b/libs/community/tests/unit_tests/load/test_serializable.py @@ -31,10 +31,10 @@ def import_all_modules(package_name: str) -> dict: continue if ( - isinstance(attr.is_lc_serializable(), bool) # type: ignore - and attr.is_lc_serializable() # type: ignore + isinstance(attr.is_lc_serializable(), bool) + and attr.is_lc_serializable() ): - key = tuple(attr.lc_id()) # type: ignore + key = tuple(attr.lc_id()) value = tuple(attr.__module__.split(".") + [attr.__name__]) if key in classes and classes[key] != value: raise ValueError diff --git a/libs/community/tests/unit_tests/retrievers/test_bedrock.py b/libs/community/tests/unit_tests/retrievers/test_bedrock.py index 6f49cd7bd2b..cabdd51d31c 100644 --- a/libs/community/tests/unit_tests/retrievers/test_bedrock.py +++ b/libs/community/tests/unit_tests/retrievers/test_bedrock.py @@ -32,7 +32,7 @@ def test_create_client() -> None: # Import error if boto3 is not installed # Value error if credentials are not supplied. with pytest.raises((ImportError, ValueError)): - AmazonKnowledgeBasesRetriever() # type: ignore + AmazonKnowledgeBasesRetriever() # type: ignore[call-arg] def test_standard_params(amazon_retriever: AmazonKnowledgeBasesRetriever) -> None: @@ -58,7 +58,7 @@ def test_get_relevant_documents( } documents: List[Document] = amazon_retriever._get_relevant_documents( query, - run_manager=None, # type: ignore + run_manager=None, # type: ignore[arg-type] ) assert len(documents) == 3 diff --git a/libs/community/tests/unit_tests/test_cache.py b/libs/community/tests/unit_tests/test_cache.py index 24887d619ba..c118c4c20ce 100644 --- a/libs/community/tests/unit_tests/test_cache.py +++ b/libs/community/tests/unit_tests/test_cache.py @@ -267,7 +267,7 @@ def test_sql_alchemy_cache() -> None: """Test custom_caching behavior.""" Base = declarative_base() - class FulltextLLMCache(Base): # type: ignore + class FulltextLLMCache(Base): # type: ignore[misc,valid-type] """Postgres table for fulltext-indexed LLM Cache.""" __tablename__ = "llm_cache_fulltext" diff --git a/libs/community/tests/unit_tests/test_sql_database_schema.py b/libs/community/tests/unit_tests/test_sql_database_schema.py index 2b1e815a051..44dac19e164 100644 --- a/libs/community/tests/unit_tests/test_sql_database_schema.py +++ b/libs/community/tests/unit_tests/test_sql_database_schema.py @@ -93,7 +93,7 @@ def test_sql_database_run() -> None: for record in records: assert isinstance(record.message, Warning) assert any( - record.message.args[0] # type: ignore + record.message.args[0] # type: ignore[union-attr] == "duckdb-engine doesn't yet support reflection on indices" for record in records ) diff --git a/libs/community/tests/unit_tests/tools/databricks/test_tools.py b/libs/community/tests/unit_tests/tools/databricks/test_tools.py index dc47bee12c2..28eb3e5f7ca 100644 --- a/libs/community/tests/unit_tests/tools/databricks/test_tools.py +++ b/libs/community/tests/unit_tests/tools/databricks/test_tools.py @@ -1,3 +1,4 @@ +from typing import Any, Optional from unittest import mock import pytest @@ -27,20 +28,20 @@ from langchain_community.tools.databricks._execution import ( def test_execute_function(parameters: dict, execute_params: dict) -> None: workspace_client = mock.Mock() - def mock_execute_statement( # type: ignore - statement, - warehouse_id, + def mock_execute_statement( + statement: str, + warehouse_id: str, *, - byte_limit=None, - catalog=None, - disposition=None, - format=None, - on_wait_timeout=None, - parameters=None, - row_limit=None, - schema=None, - wait_timeout=None, - ): + byte_limit: Optional[int] = None, + catalog: Optional[str] = None, + disposition: Optional[Any] = None, + format: Optional[Any] = None, + on_wait_timeout: Optional[Any] = None, + parameters: Optional[list[Any]] = None, + row_limit: Optional[int] = None, + schema: Optional[str] = None, + wait_timeout: Optional[str] = None, + ) -> mock.Mock: for key, value in execute_params.items(): assert locals()[key] == value return mock.Mock() @@ -58,20 +59,20 @@ def test_execute_function(parameters: dict, execute_params: dict) -> None: def test_execute_function_error() -> None: workspace_client = mock.Mock() - def mock_execute_statement( # type: ignore - statement, - warehouse_id, + def mock_execute_statement( + statement: str, + warehouse_id: str, *, - byte_limit=None, - catalog=None, - disposition=None, - format=None, - on_wait_timeout=None, - parameters=None, - row_limit=None, - schema=None, - wait_timeout=None, - ): + byte_limit: Optional[int] = None, + catalog: Optional[str] = None, + disposition: Optional[Any] = None, + format: Optional[Any] = None, + on_wait_timeout: Optional[Any] = None, + parameters: Optional[list[Any]] = None, + row_limit: Optional[int] = None, + schema: Optional[str] = None, + wait_timeout: Optional[str] = None, + ) -> mock.Mock: return mock.Mock() workspace_client.statement_execution.execute_statement = mock_execute_statement diff --git a/libs/community/tests/unit_tests/tools/test_exported.py b/libs/community/tests/unit_tests/tools/test_exported.py index 6e004f1bac6..9856f6a739d 100644 --- a/libs/community/tests/unit_tests/tools/test_exported.py +++ b/libs/community/tests/unit_tests/tools/test_exported.py @@ -24,7 +24,7 @@ def _get_tool_classes(skip_tools_without_default_names: bool) -> List[Type[BaseT if tool_class in _EXCLUDE: continue default_name = get_fields(tool_class)["name"].default - if skip_tools_without_default_names and default_name in [ # type: ignore + if skip_tools_without_default_names and default_name in [ None, "", ]: diff --git a/libs/community/tests/unit_tests/tools/test_signatures.py b/libs/community/tests/unit_tests/tools/test_signatures.py index 105a2f4ac96..62373e0f25f 100644 --- a/libs/community/tests/unit_tests/tools/test_signatures.py +++ b/libs/community/tests/unit_tests/tools/test_signatures.py @@ -35,7 +35,7 @@ def get_non_abstract_subclasses(cls: Type[BaseTool]) -> List[Type[BaseTool]]: return subclasses -@pytest.mark.parametrize("cls", get_non_abstract_subclasses(BaseTool)) # type: ignore +@pytest.mark.parametrize("cls", get_non_abstract_subclasses(BaseTool)) # type: ignore[type-abstract] def test_all_subclasses_accept_run_manager(cls: Type[BaseTool]) -> None: """Test that tools defined in this repo accept a run manager argument.""" # This wouldn't be necessary if the BaseTool had a strict API. diff --git a/libs/community/tests/unit_tests/utilities/test_nvidia_riva_asr.py b/libs/community/tests/unit_tests/utilities/test_nvidia_riva_asr.py index c5ffdacabf4..576910376dc 100644 --- a/libs/community/tests/unit_tests/utilities/test_nvidia_riva_asr.py +++ b/libs/community/tests/unit_tests/utilities/test_nvidia_riva_asr.py @@ -128,7 +128,7 @@ def test_init(asr: RivaASR) -> None: """Test that ASR accepts valid arguments.""" for key, expected_val in CONFIG.items(): if key == "url": - assert asr.url == AnyHttpUrl(expected_val) # type: ignore + assert asr.url == AnyHttpUrl(expected_val) # type: ignore[arg-type] else: assert getattr(asr, key, None) == expected_val diff --git a/libs/community/tests/unit_tests/utilities/test_nvidia_riva_tts.py b/libs/community/tests/unit_tests/utilities/test_nvidia_riva_tts.py index fe584f2723f..66da1716a7b 100644 --- a/libs/community/tests/unit_tests/utilities/test_nvidia_riva_tts.py +++ b/libs/community/tests/unit_tests/utilities/test_nvidia_riva_tts.py @@ -58,7 +58,7 @@ def test_init(tts: RivaTTS) -> None: """Test that ASR accepts valid arguments.""" for key, expected_val in CONFIG.items(): if key == "url": - assert str(tts.url) == expected_val + "/" # type: ignore + assert str(tts.url) == expected_val + "/" # type: ignore[operator] else: assert getattr(tts, key, None) == expected_val diff --git a/libs/community/tests/unit_tests/vectorstores/redis/test_redis_schema.py b/libs/community/tests/unit_tests/vectorstores/redis/test_redis_schema.py index a3afbfc5ad5..769e81c1b04 100644 --- a/libs/community/tests/unit_tests/vectorstores/redis/test_redis_schema.py +++ b/libs/community/tests/unit_tests/vectorstores/redis/test_redis_schema.py @@ -125,7 +125,7 @@ def test_read_schema_dict_input() -> None: "tag": [{"name": "tag"}], "vector": [{"name": "content_vector", "dims": 100, "algorithm": "FLAT"}], } - output = read_schema(index_schema=index_schema) # type: ignore + output = read_schema(index_schema=index_schema) # type: ignore[arg-type] assert output == index_schema @@ -139,9 +139,9 @@ def test_redis_model_creation() -> None: ) assert redis_model.text[0].name == "content" - assert redis_model.tag[0].name == "tag" # type: ignore - assert redis_model.numeric[0].name == "numeric" # type: ignore - assert redis_model.vector[0].name == "flat_vector" # type: ignore + assert redis_model.tag[0].name == "tag" # type: ignore[index] + assert redis_model.numeric[0].name == "numeric" # type: ignore[index] + assert redis_model.vector[0].name == "flat_vector" # type: ignore[index] # Test the content_vector property with pytest.raises(ValueError): diff --git a/libs/community/tests/unit_tests/vectorstores/test_databricks_vector_search.py b/libs/community/tests/unit_tests/vectorstores/test_databricks_vector_search.py index fcf0bfc9bc9..e2b987245db 100644 --- a/libs/community/tests/unit_tests/vectorstores/test_databricks_vector_search.py +++ b/libs/community/tests/unit_tests/vectorstores/test_databricks_vector_search.py @@ -114,7 +114,7 @@ EXAMPLE_SEARCH_RESPONSE = { "row_count": len(fake_texts), "data_array": sorted( [[str(uuid.uuid4()), s, random.uniform(0, 1)] for s in fake_texts], - key=lambda x: x[2], # type: ignore + key=lambda x: x[2], # type: ignore[arg-type,return-value] reverse=True, ), }, @@ -134,7 +134,7 @@ EXAMPLE_SEARCH_RESPONSE_FIXED_SCORE: Dict = { "row_count": len(fake_texts), "data_array": sorted( [[str(uuid.uuid4()), s, 0.5] for s in fake_texts], - key=lambda x: x[2], # type: ignore + key=lambda x: x[2], reverse=True, ), }, @@ -160,7 +160,7 @@ EXAMPLE_SEARCH_RESPONSE_WITH_EMBEDDING = { fake_texts, DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) ) ], - key=lambda x: x[2], # type: ignore + key=lambda x: x[2], # type: ignore[arg-type,return-value] reverse=True, ), }, diff --git a/libs/community/uv.lock b/libs/community/uv.lock index f38644705d3..6a0c213c33d 100644 --- a/libs/community/uv.lock +++ b/libs/community/uv.lock @@ -1745,7 +1745,7 @@ typing = [ { name = "langchain", editable = "../langchain" }, { name = "langchain-core", editable = "../core" }, { name = "langchain-text-splitters", editable = "../text-splitters" }, - { name = "mypy", specifier = ">=1.12,<2.0" }, + { name = "mypy", specifier = ">=1.15,<2.0" }, { name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" }, { name = "types-chardet", specifier = ">=5.0.4.6,<6.0.0.0" }, { name = "types-pytz", specifier = ">=2023.3.0.0,<2024.0.0.0" }, @@ -1797,6 +1797,8 @@ test = [ { name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" }, { name = "pytest", specifier = ">=8,<9" }, { name = "pytest-asyncio", specifier = ">=0.21.1,<1.0.0" }, + { name = "pytest-benchmark" }, + { name = "pytest-codspeed" }, { name = "pytest-mock", specifier = ">=3.10.0,<4.0.0" }, { name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" }, { name = "pytest-watcher", specifier = ">=0.3.4,<1.0.0" }, @@ -1807,8 +1809,7 @@ test = [ test-integration = [] typing = [ { name = "langchain-text-splitters", directory = "../text-splitters" }, - { name = "mypy", specifier = ">=1.10,<1.11" }, - { name = "types-jinja2", specifier = ">=2.11.9,<3.0.0" }, + { name = "mypy", specifier = ">=1.15,<1.16" }, { name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" }, { name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" }, ] @@ -2113,46 +2114,46 @@ wheels = [ [[package]] name = "mypy" -version = "1.14.1" +version = "1.15.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051 } +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717 } wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002 }, - { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400 }, - { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172 }, - { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732 }, - { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197 }, - { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836 }, - { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432 }, - { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515 }, - { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791 }, - { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203 }, - { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900 }, - { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869 }, - { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668 }, - { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060 }, - { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167 }, - { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341 }, - { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991 }, - { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016 }, - { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097 }, - { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728 }, - { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965 }, - { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660 }, - { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198 }, - { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276 }, - { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493 }, - { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702 }, - { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104 }, - { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167 }, - { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834 }, - { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231 }, - { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905 }, + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433 }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472 }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424 }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450 }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765 }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701 }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338 }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540 }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051 }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751 }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783 }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618 }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981 }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175 }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675 }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020 }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582 }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614 }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592 }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611 }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443 }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541 }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348 }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648 }, + { url = "https://files.pythonhosted.org/packages/5a/fa/79cf41a55b682794abe71372151dbbf856e3008f6767057229e6649d294a/mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078", size = 10737129 }, + { url = "https://files.pythonhosted.org/packages/d3/33/dd8feb2597d648de29e3da0a8bf4e1afbda472964d2a4a0052203a6f3594/mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba", size = 9856335 }, + { url = "https://files.pythonhosted.org/packages/e4/b5/74508959c1b06b96674b364ffeb7ae5802646b32929b7701fc6b18447592/mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5", size = 11611935 }, + { url = "https://files.pythonhosted.org/packages/6c/53/da61b9d9973efcd6507183fdad96606996191657fe79701b2c818714d573/mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b", size = 12365827 }, + { url = "https://files.pythonhosted.org/packages/c1/72/965bd9ee89540c79a25778cc080c7e6ef40aa1eeac4d52cec7eae6eb5228/mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2", size = 12541924 }, + { url = "https://files.pythonhosted.org/packages/46/d0/f41645c2eb263e6c77ada7d76f894c580c9ddb20d77f0c24d34273a4dab2/mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980", size = 9271176 }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777 }, ] [[package]]