diff --git a/libs/community/langchain_community/document_loaders/sharepoint.py b/libs/community/langchain_community/document_loaders/sharepoint.py index f4d57d66d42..bfcc47fba1b 100644 --- a/libs/community/langchain_community/document_loaders/sharepoint.py +++ b/libs/community/langchain_community/document_loaders/sharepoint.py @@ -1,8 +1,12 @@ """Loader that loads data from Sharepoint Document Library""" from __future__ import annotations -from typing import Iterator, List, Optional, Sequence +import json +from pathlib import Path +from typing import Any, Iterator, List, Optional, Sequence +import requests +from langchain_core.document_loaders import BaseLoader from langchain_core.documents import Document from langchain_core.pydantic_v1 import Field @@ -13,7 +17,7 @@ from langchain_community.document_loaders.base_o365 import ( from langchain_community.document_loaders.parsers.registry import get_parser -class SharePointLoader(O365BaseLoader): +class SharePointLoader(O365BaseLoader, BaseLoader): """Load from `SharePoint`.""" document_library_id: str = Field(...) @@ -24,6 +28,14 @@ class SharePointLoader(O365BaseLoader): """ The IDs of the objects to load data from.""" folder_id: Optional[str] = None """ The ID of the folder to load data from.""" + load_auth: Optional[bool] = False + """ Whether to load authorization identities.""" + token_path: Path = Path.home() / ".credentials" / "o365_token.txt" + """ The path to the token to make api calls""" + file_id: Optional[str] = None + """ The ID of the file for which we need auth identities""" + site_id: Optional[str] = None + """ The ID of the Sharepoint site of the user where the file is present """ @property def _file_types(self) -> Sequence[_FileType]: @@ -52,7 +64,10 @@ class SharePointLoader(O365BaseLoader): if not isinstance(target_folder, Folder): raise ValueError(f"There isn't a folder with path {self.folder_path}.") for blob in self._load_from_folder(target_folder): - yield from blob_parser.lazy_parse(blob) + for parsed_blob in blob_parser.lazy_parse(blob): + auth_identities = self.authorized_identities() + parsed_blob.metadata["authorized_identities"] = auth_identities + yield parsed_blob if self.folder_id: target_folder = drive.get_item(self.folder_id) if not isinstance(target_folder, Folder): @@ -68,3 +83,39 @@ class SharePointLoader(O365BaseLoader): raise ValueError("Unable to fetch root folder") for blob in self._load_from_folder(target_folder): yield from blob_parser.lazy_parse(blob) + + def authorized_identities(self) -> List: + data = self._fetch_access_token() + access_token = data.get("access_token") + url = ( + f"https://graph.microsoft.com/v1.0/sites/{self.site_id}/" + f"drives/{self.document_library_id}/items/{self.file_id}/permissions" + ) + headers = {"Authorization": f"Bearer {access_token}"} + response = requests.request("GET", url, headers=headers, data={}) + groups_list = response.json() + + group_names = [] + + for group_data in groups_list.get("value"): + if group_data.get("grantedToV2"): + if group_data.get("grantedToV2").get("siteGroup"): + site_data = group_data.get("grantedToV2").get("siteGroup") + # print(group_data) + group_names.append(site_data.get("displayName")) + elif group_data.get("grantedToV2").get("group") or ( + group_data.get("grantedToV2").get("user") + ): + site_data = group_data.get("grantedToV2").get("group") or ( + group_data.get("grantedToV2").get("user") + ) + # print(group_data) + group_names.append(site_data.get("displayName")) + + return group_names + + def _fetch_access_token(self) -> Any: + with open(self.token_path) as f: + s = f.read() + data = json.loads(s) + return data