mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-12 12:59:07 +00:00
community[patch]: upgrade to recent version of mypy (#21616)
This PR upgrades community to a recent version of mypy. It inserts type: ignore on all existing failures.
This commit is contained in:
@@ -70,7 +70,7 @@ def fetch_mime_types(file_types: Sequence[_FileType]) -> Dict[str, str]:
|
||||
class O365BaseLoader(BaseLoader, BaseModel):
|
||||
"""Base class for all loaders that uses O365 Package"""
|
||||
|
||||
settings: _O365Settings = Field(default_factory=_O365Settings)
|
||||
settings: _O365Settings = Field(default_factory=_O365Settings) # type: ignore[arg-type]
|
||||
"""Settings for the Office365 API client."""
|
||||
auth_with_token: bool = False
|
||||
"""Whether to authenticate with a token or not. Defaults to False."""
|
||||
|
@@ -86,7 +86,7 @@ class KineticaLoader(BaseLoader):
|
||||
query_result = self._execute_query()
|
||||
if isinstance(query_result, Exception):
|
||||
print(f"An error occurred during the query: {query_result}") # noqa: T201
|
||||
return []
|
||||
return [] # type: ignore[return-value]
|
||||
page_content_columns, metadata_columns = self._get_columns(query_result)
|
||||
if "*" in page_content_columns:
|
||||
page_content_columns = list(query_result[0].keys())
|
||||
|
@@ -58,8 +58,8 @@ class MHTMLLoader(BaseLoader):
|
||||
parts = [message]
|
||||
|
||||
for part in parts:
|
||||
if part.get_content_type() == "text/html":
|
||||
html = part.get_payload(decode=True).decode()
|
||||
if part.get_content_type() == "text/html": # type: ignore[union-attr]
|
||||
html = part.get_payload(decode=True).decode() # type: ignore[union-attr]
|
||||
|
||||
soup = BeautifulSoup(html, **self.bs_kwargs)
|
||||
text = soup.get_text(self.get_text_separator)
|
||||
|
@@ -31,7 +31,7 @@ class _OneNoteGraphSettings(BaseSettings):
|
||||
class OneNoteLoader(BaseLoader, BaseModel):
|
||||
"""Load pages from OneNote notebooks."""
|
||||
|
||||
settings: _OneNoteGraphSettings = Field(default_factory=_OneNoteGraphSettings)
|
||||
settings: _OneNoteGraphSettings = Field(default_factory=_OneNoteGraphSettings) # type: ignore[arg-type]
|
||||
"""Settings for the Microsoft Graph API client."""
|
||||
auth_with_token: bool = False
|
||||
"""Whether to authenticate with a token or not. Defaults to False."""
|
||||
|
@@ -691,7 +691,7 @@ class AmazonTextractPDFLoader(BasePDFLoader):
|
||||
# raises ValueError when multi-page and not on S3"""
|
||||
|
||||
if self.web_path and self._is_s3_url(self.web_path):
|
||||
blob = Blob(path=self.web_path) # type: ignore[misc]
|
||||
blob = Blob(path=self.web_path) # type: ignore[call-arg] # type: ignore[misc]
|
||||
else:
|
||||
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
|
||||
if AmazonTextractPDFLoader._get_number_of_pages(blob) > 1:
|
||||
|
@@ -28,8 +28,8 @@ class PubMedLoader(BaseLoader):
|
||||
"""
|
||||
self.query = query
|
||||
self.load_max_docs = load_max_docs
|
||||
self._client = PubMedAPIWrapper(
|
||||
top_k_results=load_max_docs,
|
||||
self._client = PubMedAPIWrapper( # type: ignore[call-arg]
|
||||
top_k_results=load_max_docs, # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
|
@@ -111,7 +111,7 @@ class SnowflakeLoader(BaseLoader):
|
||||
query_result = self._execute_query()
|
||||
if isinstance(query_result, Exception):
|
||||
print(f"An error occurred during the query: {query_result}") # noqa: T201
|
||||
return []
|
||||
return [] # type: ignore[return-value]
|
||||
page_content_columns, metadata_columns = self._get_columns(query_result)
|
||||
if "*" in page_content_columns:
|
||||
page_content_columns = list(query_result[0].keys())
|
||||
|
@@ -66,10 +66,10 @@ class TensorflowDatasetLoader(BaseLoader):
|
||||
] = sample_to_document_function
|
||||
"""Custom function that transform a dataset sample into a Document."""
|
||||
|
||||
self._tfds_client = TensorflowDatasets(
|
||||
self._tfds_client = TensorflowDatasets( # type: ignore[call-arg]
|
||||
dataset_name=self.dataset_name,
|
||||
split_name=self.split_name,
|
||||
load_max_docs=self.load_max_docs,
|
||||
load_max_docs=self.load_max_docs, # type: ignore[arg-type]
|
||||
sample_to_document_function=self.sample_to_document_function,
|
||||
)
|
||||
|
||||
|
@@ -32,7 +32,7 @@ class WeatherDataLoader(BaseLoader):
|
||||
def from_params(
|
||||
cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None
|
||||
) -> WeatherDataLoader:
|
||||
client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key)
|
||||
client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) # type: ignore[call-arg]
|
||||
return cls(client, places)
|
||||
|
||||
def lazy_load(
|
||||
|
@@ -50,10 +50,10 @@ class WikipediaLoader(BaseLoader):
|
||||
A list of Document objects representing the loaded
|
||||
Wikipedia pages.
|
||||
"""
|
||||
client = WikipediaAPIWrapper(
|
||||
client = WikipediaAPIWrapper( # type: ignore[call-arg]
|
||||
lang=self.lang,
|
||||
top_k_results=self.load_max_docs,
|
||||
load_all_available_meta=self.load_all_available_meta,
|
||||
doc_content_chars_max=self.doc_content_chars_max,
|
||||
top_k_results=self.load_max_docs, # type: ignore[arg-type]
|
||||
load_all_available_meta=self.load_all_available_meta, # type: ignore[arg-type]
|
||||
doc_content_chars_max=self.doc_content_chars_max, # type: ignore[arg-type]
|
||||
)
|
||||
yield from client.load(self.query)
|
||||
|
Reference in New Issue
Block a user