mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-15 22:44:36 +00:00
community[patch]: Changes to base_o365 and sharepoint document loaders (#20373)
## Description: The PR introduces 3 changes: 1. added `recursive` property to `O365BaseLoader`. (To keep the behavior unchanged, by default is set to `False`). When `recursive=True`, `_load_from_folder()` also recursively loads all nested folders. 2. added `folder_id` to SharePointLoader.(similar to (this PR)[https://github.com/langchain-ai/langchain/pull/10780] ) This provides an alternative to `folder_path` that doesn't seem to reliably work. 3. when none of `document_ids`, `folder_id`, `folder_path` is provided, the loader fetches documets from root folder. Combined with `recursive=True` this provides an easy way of loading all compatible documents from SharePoint. The PR contains the same logic as [this stale PR](https://github.com/langchain-ai/langchain/pull/10780) by @WaleedAlfaris. I'd like to ask his blessing for moving forward with this one. ## Issue: - As described in https://github.com/langchain-ai/langchain/issues/19938 and https://github.com/langchain-ai/langchain/pull/10780 the sharepoint loader often does not seem to work with folder_path. - Recursive loading of subfolders is a missing functionality ## Dependecies: None Twitter handle: @martintriska1 @WRhetoric This is my first PR here, please be gentle :-) Please review @baskaryan --------- Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com>
This commit is contained in:
@@ -76,6 +76,8 @@ class O365BaseLoader(BaseLoader, BaseModel):
|
||||
"""Whether to authenticate with a token or not. Defaults to False."""
|
||||
chunk_size: Union[int, str] = CHUNK_SIZE
|
||||
"""Number of bytes to retrieve from each api call to the server. int or 'auto'."""
|
||||
recursive: bool = False
|
||||
"""Should the loader recursively load subfolders?"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
@@ -114,6 +116,9 @@ class O365BaseLoader(BaseLoader, BaseModel):
|
||||
file.download(to_path=temp_dir, chunk_size=self.chunk_size)
|
||||
loader = FileSystemBlobLoader(path=temp_dir)
|
||||
yield from loader.yield_blobs()
|
||||
if self.recursive:
|
||||
for subfolder in folder.get_child_folders():
|
||||
yield from self._load_from_folder(subfolder)
|
||||
|
||||
def _load_from_object_ids(
|
||||
self, drive: Drive, object_ids: List[str]
|
||||
|
@@ -22,6 +22,8 @@ class SharePointLoader(O365BaseLoader):
|
||||
""" The path to the folder to load data from."""
|
||||
object_ids: Optional[List[str]] = None
|
||||
""" The IDs of the objects to load data from."""
|
||||
folder_id: Optional[str] = None
|
||||
""" The ID of the folder to load data from."""
|
||||
|
||||
@property
|
||||
def _file_types(self) -> Sequence[_FileType]:
|
||||
@@ -51,6 +53,18 @@ class SharePointLoader(O365BaseLoader):
|
||||
raise ValueError(f"There isn't a folder with path {self.folder_path}.")
|
||||
for blob in self._load_from_folder(target_folder):
|
||||
yield from blob_parser.lazy_parse(blob)
|
||||
if self.folder_id:
|
||||
target_folder = drive.get_item(self.folder_id)
|
||||
if not isinstance(target_folder, Folder):
|
||||
raise ValueError(f"There isn't a folder with path {self.folder_path}.")
|
||||
for blob in self._load_from_folder(target_folder):
|
||||
yield from blob_parser.lazy_parse(blob)
|
||||
if self.object_ids:
|
||||
for blob in self._load_from_object_ids(drive, self.object_ids):
|
||||
yield from blob_parser.lazy_parse(blob)
|
||||
if not (self.folder_path or self.folder_id or self.object_ids):
|
||||
target_folder = drive.get_root_folder()
|
||||
if not isinstance(target_folder, Folder):
|
||||
raise ValueError("Unable to fetch root folder")
|
||||
for blob in self._load_from_folder(target_folder):
|
||||
yield from blob_parser.lazy_parse(blob)
|
||||
|
Reference in New Issue
Block a user