diff --git a/ci/requirements.txt b/ci/requirements.txt index a33b658..bceda35 100644 --- a/ci/requirements.txt +++ b/ci/requirements.txt @@ -4,3 +4,4 @@ pytest>=3.3.2 backports.functools_lru_cache>=1.4 tenacity>=4.8.0 future +requests-toolbelt diff --git a/ci/run.py b/ci/run.py index 03752b2..ec9620a 100755 --- a/ci/run.py +++ b/ci/run.py @@ -212,24 +212,28 @@ def main(): else: dbs = ('sqlite3',) for db in dbs: - shell('rm -rf {}/*'.format(INSTALLDIR)) start_and_test_with_db(db) def start_and_test_with_db(db): - info('Setting up seafile server with %s database', db) - server = ServerCtl( - TOPDIR, - INSTALLDIR, - db=db, - # Use the newly built seaf-server (to avoid "make install" each time when developping locally) - seaf_server_bin=join(SeafileServer().projectdir, 'server/seaf-server') - ) - server.setup() - with server.run(): - info('Testing with %s database', db) - with cd(SeafileServer().projectdir): - shell('py.test', env=server.get_seaserv_envs()) + fileservers = ('go_fileserver', 'c_fileserver') + for fileserver in fileservers: + shell('rm -rf {}/*'.format(INSTALLDIR)) + info('Setting up seafile server with %s database, use %s', db, fileserver) + server = ServerCtl( + TOPDIR, + SeafileServer().projectdir, + INSTALLDIR, + fileserver, + db=db, + # Use the newly built seaf-server (to avoid "make install" each time when developping locally) + seaf_server_bin=join(SeafileServer().projectdir, 'server/seaf-server') + ) + server.setup() + with server.run(): + info('Testing with %s database', db) + with cd(SeafileServer().projectdir): + shell('py.test', env=server.get_seaserv_envs()) if __name__ == '__main__': diff --git a/ci/serverctl.py b/ci/serverctl.py index d3a52e5..09cf05e 100755 --- a/ci/serverctl.py +++ b/ci/serverctl.py @@ -22,7 +22,7 @@ logger = logging.getLogger(__name__) class ServerCtl(object): - def __init__(self, topdir, datadir, db='sqlite3', seaf_server_bin='seaf-server', ccnet_server_bin='ccnet-server'): + def __init__(self, topdir, projectdir, datadir, fileserver, db='sqlite3', seaf_server_bin='seaf-server', ccnet_server_bin='ccnet-server'): self.db = db self.datadir = datadir self.central_conf_dir = join(datadir, 'conf') @@ -33,6 +33,7 @@ class ServerCtl(object): mkdirs(self.log_dir) self.ccnet_log = join(self.log_dir, 'ccnet.log') self.seafile_log = join(self.log_dir, 'seafile.log') + self.fileserver_log = join(self.log_dir, 'fileserver.log') self.ccnet_server_bin = ccnet_server_bin self.seaf_server_bin = seaf_server_bin @@ -41,6 +42,9 @@ class ServerCtl(object): self.ccnet_proc = None self.seafile_proc = None + self.fileserver_proc = None + self.projectdir = projectdir + self.fileserver = fileserver def setup(self): if self.db == 'mysql': @@ -86,7 +90,14 @@ CONNECTION_CHARSET = utf8 def init_seafile(self): seafile_conf = join(self.central_conf_dir, 'seafile.conf') - seafile_fileserver_conf = '''\ + if self.fileserver == 'go_fileserver': + seafile_fileserver_conf = '''\ +[fileserver] +use_go_fileserver = true +port=8082 +''' + else: + seafile_fileserver_conf = '''\ [fileserver] port=8082 ''' @@ -150,6 +161,7 @@ connection_charset = utf8 self.create_database_tables() logger.info('Starting seafile server') self.start_seafile() + self.start_fileserver() def create_database_tables(self): if self.db == 'mysql': @@ -217,6 +229,22 @@ connection_charset = utf8 ] self.seafile_proc = shell(cmd, wait=False) + def start_fileserver(self): + cmd = [ + "./fileserver", + "-F", + self.central_conf_dir, + "-d", + self.seafile_conf_dir, + "-l", + self.fileserver_log, + ] + fileserver_path = join(self.projectdir, 'fileserver') + with cd(fileserver_path): + shell("go build") + self.fileserver_proc = shell(cmd, wait=False) + + def stop(self): if self.ccnet_proc: logger.info('Stopping ccnet server') @@ -224,6 +252,11 @@ connection_charset = utf8 if self.seafile_proc: logger.info('Stopping seafile server') self.seafile_proc.kill() + if self.fileserver_proc: + logger.info('Stopping go fileserver') + self.fileserver_proc.kill() + if self.db == 'mysql': + del_mysql_dbs() def get_seaserv_envs(self): envs = dict(os.environ) @@ -247,3 +280,12 @@ GRANT ALL PRIVILEGES ON `seafile`.* to `seafile`@localhost; ''' shell('sudo mysql -u root -proot', inputdata=sql) + +def del_mysql_dbs(): + sql = b'''\ +drop database `ccnet`; +drop database `seafile`; +drop user 'seafile'@'localhost'; + ''' + + shell('sudo mysql -u root -proot', inputdata=sql) diff --git a/fileserver/blockmgr/blockmgr.go b/fileserver/blockmgr/blockmgr.go new file mode 100644 index 0000000..d631eec --- /dev/null +++ b/fileserver/blockmgr/blockmgr.go @@ -0,0 +1,46 @@ +// Package blockmgr provides operations on blocks +package blockmgr + +import ( + "github.com/haiwen/seafile-server/fileserver/objstore" + "io" +) + +var store *objstore.ObjectStore + +// Init initializes block manager and creates underlying object store. +func Init(seafileConfPath string, seafileDataDir string) { + store = objstore.New(seafileConfPath, seafileDataDir, "blocks") +} + +// Read reads block from storage backend. +func Read(repoID string, blockID string, w io.Writer) error { + err := store.Read(repoID, blockID, w) + if err != nil { + return err + } + + return nil +} + +// Write writes block to storage backend. +func Write(repoID string, blockID string, r io.Reader) error { + err := store.Write(repoID, blockID, r, false) + if err != nil { + return err + } + + return nil +} + +// Exists checks block if exists. +func Exists(repoID string, blockID string) bool { + ret, _ := store.Exists(repoID, blockID) + return ret +} + +// Stat calculates block size. +func Stat(repoID string, blockID string) (int64, error) { + ret, err := store.Stat(repoID, blockID) + return ret, err +} diff --git a/fileserver/blockmgr/blockmgr_test.go b/fileserver/blockmgr/blockmgr_test.go new file mode 100644 index 0000000..894280f --- /dev/null +++ b/fileserver/blockmgr/blockmgr_test.go @@ -0,0 +1,103 @@ +package blockmgr + +import ( + "bytes" + "fmt" + "os" + "path" + "testing" +) + +const ( + blockID = "0401fc662e3bc87a41f299a907c056aaf8322a27" + repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" + seafileConfPath = "/tmp/conf" + seafileDataDir = "/tmp/conf/seafile-data" + testFile = "output.data" +) + +func delFile() error { + err := os.Remove(testFile) + if err != nil { + return err + } + + err = os.RemoveAll(seafileConfPath) + if err != nil { + return err + } + + return nil +} + +func createFile() error { + outputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return err + } + defer outputFile.Close() + + outputString := "hello world!\n" + for i := 0; i < 10; i++ { + outputFile.WriteString(outputString) + } + + return nil +} + +func TestMain(m *testing.M) { + err := createFile() + if err != nil { + fmt.Printf("Failed to create test file : %v\n", err) + os.Exit(1) + } + code := m.Run() + err = delFile() + if err != nil { + fmt.Printf("Failed to remove test file : %v\n", err) + os.Exit(1) + } + os.Exit(code) +} + +func testBlockRead(t *testing.T) { + var buf bytes.Buffer + err := Read(repoID, blockID, &buf) + if err != nil { + t.Errorf("Failed to read block.\n") + } +} + +func testBlockWrite(t *testing.T) { + inputFile, err := os.Open(testFile) + if err != nil { + t.Errorf("Failed to open test file : %v\n", err) + } + defer inputFile.Close() + + err = Write(repoID, blockID, inputFile) + if err != nil { + t.Errorf("Failed to write block.\n") + } +} + +func testBlockExists(t *testing.T) { + ret := Exists(repoID, blockID) + if !ret { + t.Errorf("Block is not exist\n") + } + + filePath := path.Join(seafileDataDir, "storage", "blocks", repoID, blockID[:2], blockID[2:]) + fileInfo, _ := os.Stat(filePath) + if fileInfo.Size() != 130 { + t.Errorf("Block is exist, but the size of file is incorrect.\n") + } + +} + +func TestBlock(t *testing.T) { + Init(seafileConfPath, seafileDataDir) + testBlockWrite(t) + testBlockRead(t) + testBlockExists(t) +} diff --git a/fileserver/commitmgr/commitmgr.go b/fileserver/commitmgr/commitmgr.go new file mode 100644 index 0000000..5ab5e31 --- /dev/null +++ b/fileserver/commitmgr/commitmgr.go @@ -0,0 +1,159 @@ +// Package commitmgr manages commit objects. +package commitmgr + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "encoding/json" + "io" + "time" + + "github.com/haiwen/seafile-server/fileserver/objstore" +) + +// Commit is a commit object +type Commit struct { + CommitID string `json:"commit_id"` + RepoID string `json:"repo_id"` + RootID string `json:"root_id"` + CreatorName string `json:"creator_name,omitempty"` + CreatorID string `json:"creator"` + Desc string `json:"description"` + Ctime int64 `json:"ctime"` + ParentID string `json:"parent_id,omitempty"` + SecondParentID string `json:"second_parent_id,omitempty"` + RepoName string `json:"repo_name"` + RepoDesc string `json:"repo_desc"` + RepoCategory string `json:"repo_category"` + DeviceName string `json:"device_name,omitempty"` + ClientVersion string `json:"client_version,omitempty"` + Encrypted string `json:"encrypted,omitempty"` + EncVersion int `json:"enc_version,omitempty"` + Magic string `json:"magic,omitempty"` + RandomKey string `json:"key,omitempty"` + Salt string `json:"salt,omitempty"` + Version int `json:"version,omitempty"` + Conflict int `json:"conflict,omitempty"` + NewMerge int `json:"new_merge,omitempty"` + Repaired int `json:"repaired,omitempty"` +} + +var store *objstore.ObjectStore + +// Init initializes commit manager and creates underlying object store. +func Init(seafileConfPath string, seafileDataDir string) { + store = objstore.New(seafileConfPath, seafileDataDir, "commits") +} + +// NewCommit initializes a Commit object. +func NewCommit(repoID, parentID, newRoot, user, desc string) *Commit { + commit := new(Commit) + commit.RepoID = repoID + commit.RootID = newRoot + commit.Desc = desc + commit.CreatorName = user + commit.CreatorID = "0000000000000000000000000000000000000000" + commit.Ctime = time.Now().Unix() + commit.CommitID = computeCommitID(commit) + commit.ParentID = parentID + + return commit +} + +func computeCommitID(commit *Commit) string { + hash := sha1.New() + hash.Write([]byte(commit.RootID)) + hash.Write([]byte(commit.CreatorID)) + hash.Write([]byte(commit.CreatorName)) + hash.Write([]byte(commit.Desc)) + tmpBuf := make([]byte, 8) + binary.BigEndian.PutUint64(tmpBuf, uint64(commit.Ctime)) + hash.Write(tmpBuf) + + checkSum := hash.Sum(nil) + id := hex.EncodeToString(checkSum[:]) + + return id +} + +// FromData reads from p and converts JSON-encoded data to commit. +func (commit *Commit) FromData(p []byte) error { + err := json.Unmarshal(p, commit) + if err != nil { + return err + } + + return nil +} + +// ToData converts commit to JSON-encoded data and writes to w. +func (commit *Commit) ToData(w io.Writer) error { + jsonstr, err := json.Marshal(commit) + if err != nil { + return err + } + + _, err = w.Write(jsonstr) + if err != nil { + return err + } + + return nil +} + +// ReadRaw reads data in binary format from storage backend. +func ReadRaw(repoID string, commitID string, w io.Writer) error { + err := store.Read(repoID, commitID, w) + if err != nil { + return err + } + return nil +} + +// WriteRaw writes data in binary format to storage backend. +func WriteRaw(repoID string, commitID string, r io.Reader) error { + err := store.Write(repoID, commitID, r, false) + if err != nil { + return err + } + return nil +} + +// Load commit from storage backend. +func Load(repoID string, commitID string) (*Commit, error) { + var buf bytes.Buffer + commit := new(Commit) + err := ReadRaw(repoID, commitID, &buf) + if err != nil { + return nil, err + } + err = commit.FromData(buf.Bytes()) + if err != nil { + return nil, err + } + + return commit, nil +} + +// Save commit to storage backend. +func Save(commit *Commit) error { + var buf bytes.Buffer + err := commit.ToData(&buf) + if err != nil { + return err + } + + err = WriteRaw(commit.RepoID, commit.CommitID, &buf) + if err != nil { + return err + } + + return err +} + +// Exists checks commit if exists. +func Exists(repoID string, commitID string) (bool, error) { + return store.Exists(repoID, commitID) +} diff --git a/fileserver/commitmgr/commitmgr_test.go b/fileserver/commitmgr/commitmgr_test.go new file mode 100644 index 0000000..fd4501d --- /dev/null +++ b/fileserver/commitmgr/commitmgr_test.go @@ -0,0 +1,67 @@ +package commitmgr + +import ( + "fmt" + "os" + "testing" + "time" +) + +const ( + commitID = "0401fc662e3bc87a41f299a907c056aaf8322a27" + repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" + seafileConfPath = "/tmp/conf" + seafileDataDir = "/tmp/conf/seafile-data" +) + +func delFile() error { + err := os.RemoveAll(seafileConfPath) + if err != nil { + return err + } + + return nil +} + +func TestMain(m *testing.M) { + code := m.Run() + err := delFile() + if err != nil { + fmt.Printf("Failed to remove test file : %v\n", err) + os.Exit(1) + } + os.Exit(code) +} + +func assertEqual(t *testing.T, a, b interface{}) { + if a != b { + t.Errorf("Not Equal.%t,%t", a, b) + } +} + +func TestCommit(t *testing.T) { + Init(seafileConfPath, seafileDataDir) + newCommit := new(Commit) + newCommit.CommitID = commitID + newCommit.RepoID = repoID + newCommit.CreatorName = "seafile" + newCommit.CreatorID = commitID + newCommit.Desc = "This is a commit" + newCommit.Ctime = time.Now().Unix() + newCommit.ParentID = commitID + newCommit.DeviceName = "Linux" + err := Save(newCommit) + if err != nil { + t.Errorf("Failed to save commit.\n") + } + + commit, err := Load(repoID, commitID) + if err != nil { + t.Errorf("Failed to load commit.\n") + } + assertEqual(t, commit.CommitID, commitID) + assertEqual(t, commit.RepoID, repoID) + assertEqual(t, commit.CreatorName, "seafile") + assertEqual(t, commit.CreatorID, commitID) + assertEqual(t, commit.ParentID, commitID) +} diff --git a/fileserver/crypt.go b/fileserver/crypt.go new file mode 100644 index 0000000..9607c35 --- /dev/null +++ b/fileserver/crypt.go @@ -0,0 +1,47 @@ +package main + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" +) + +func pkcs7Padding(p []byte, blockSize int) []byte { + padding := blockSize - len(p)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + return append(p, padtext...) +} + +func pkcs7UnPadding(p []byte) []byte { + length := len(p) + paddLen := int(p[length-1]) + return p[:(length - paddLen)] +} + +func decrypt(input, key, iv []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + out := make([]byte, len(input)) + blockMode := cipher.NewCBCDecrypter(block, iv) + blockMode.CryptBlocks(out, input) + out = pkcs7UnPadding(out) + + return out, nil +} + +func encrypt(input, key, iv []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + input = pkcs7Padding(input, block.BlockSize()) + out := make([]byte, len(input)) + blockMode := cipher.NewCBCEncrypter(block, iv) + blockMode.CryptBlocks(out, input) + + return out, nil +} diff --git a/fileserver/diff/diff.go b/fileserver/diff/diff.go new file mode 100644 index 0000000..1d7be13 --- /dev/null +++ b/fileserver/diff/diff.go @@ -0,0 +1,597 @@ +package diff + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/haiwen/seafile-server/fileserver/commitmgr" + "github.com/haiwen/seafile-server/fileserver/fsmgr" + "github.com/haiwen/seafile-server/fileserver/repomgr" +) + +// Empty value of sha1 +const ( + EmptySha1 = "0000000000000000000000000000000000000000" +) + +type fileCB func(string, []*fsmgr.SeafDirent, interface{}) error +type dirCB func(string, []*fsmgr.SeafDirent, interface{}, *bool) error + +type DiffOptions struct { + FileCB fileCB + DirCB dirCB + RepoID string + Data interface{} +} + +type diffData struct { + foldDirDiff bool + results *[]*DiffEntry +} + +func DiffTrees(roots []string, opt *DiffOptions) error { + n := len(roots) + if n != 2 && n != 3 { + err := fmt.Errorf("the number of commit trees is illegal") + return err + } + trees := make([]*fsmgr.SeafDir, n) + for i := 0; i < n; i++ { + root, err := fsmgr.GetSeafdir(opt.RepoID, roots[i]) + if err != nil { + err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, roots[i]) + return err + } + trees[i] = root + } + + return diffTreesRecursive(trees, "", opt) +} + +func diffTreesRecursive(trees []*fsmgr.SeafDir, baseDir string, opt *DiffOptions) error { + n := len(trees) + ptrs := make([][]*fsmgr.SeafDirent, 3) + + for i := 0; i < n; i++ { + if trees[i] != nil { + ptrs[i] = trees[i].Entries + } else { + ptrs[i] = nil + } + } + + var firstName string + var done bool + var offset = make([]int, n) + for { + dents := make([]*fsmgr.SeafDirent, 3) + firstName = "" + done = true + for i := 0; i < n; i++ { + if len(ptrs[i]) > offset[i] { + done = false + dent := ptrs[i][offset[i]] + + if firstName == "" { + firstName = dent.Name + } else if strings.Compare(dent.Name, firstName) > 0 { + firstName = dent.Name + } + } + + } + if done { + break + } + + for i := 0; i < n; i++ { + if len(ptrs[i]) > offset[i] { + dent := ptrs[i][offset[i]] + if firstName == dent.Name { + dents[i] = dent + offset[i]++ + } + + } + } + + if n == 2 && dents[0] != nil && dents[1] != nil && + direntSame(dents[0], dents[1]) { + continue + } + if n == 3 && dents[0] != nil && dents[1] != nil && + dents[2] != nil && direntSame(dents[0], dents[1]) && + direntSame(dents[0], dents[2]) { + continue + } + + if err := diffFiles(baseDir, dents, opt); err != nil { + return err + } + if err := diffDirectories(baseDir, dents, opt); err != nil { + return err + } + } + return nil +} + +func diffFiles(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error { + n := len(dents) + var nFiles int + files := make([]*fsmgr.SeafDirent, 3) + for i := 0; i < n; i++ { + if dents[i] != nil && fsmgr.IsRegular(dents[i].Mode) { + files[i] = dents[i] + nFiles++ + } + } + + if nFiles == 0 { + return nil + } + + return opt.FileCB(baseDir, files, opt.Data) +} + +func diffDirectories(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error { + n := len(dents) + dirs := make([]*fsmgr.SeafDirent, 3) + subDirs := make([]*fsmgr.SeafDir, 3) + var nDirs int + for i := 0; i < n; i++ { + if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { + dirs[i] = dents[i] + nDirs++ + } + } + if nDirs == 0 { + return nil + } + + recurse := true + err := opt.DirCB(baseDir, dirs, opt.Data, &recurse) + if err != nil { + err := fmt.Errorf("failed to call dir callback: %v", err) + return err + } + + if !recurse { + return nil + } + + var dirName string + for i := 0; i < n; i++ { + if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { + dir, err := fsmgr.GetSeafdir(opt.RepoID, dents[i].ID) + if err != nil { + err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, dents[i].ID) + return err + } + subDirs[i] = dir + dirName = dents[i].Name + } + } + + newBaseDir := baseDir + dirName + "/" + return diffTreesRecursive(subDirs, newBaseDir, opt) +} + +func direntSame(dentA, dentB *fsmgr.SeafDirent) bool { + return dentA.ID == dentB.ID && + dentA.Mode == dentB.Mode && + dentA.Mtime == dentA.Mtime +} + +// Diff type and diff status. +const ( + DiffTypeCommits = 'C' /* diff between two commits*/ + + DiffStatusAdded = 'A' + DiffStatusDeleted = 'D' + DiffStatusModified = 'M' + DiffStatusRenamed = 'R' + DiffStatusUnmerged = 'U' + DiffStatusDirAdded = 'B' + DiffStatusDirDeleted = 'C' + DiffStatusDirRenamed = 'E' +) + +type DiffEntry struct { + DiffType rune + Status rune + Sha1 string + Name string + NewName string + Size int64 + OriginSize int64 +} + +func diffEntryNewFromDirent(diffType, status rune, dent *fsmgr.SeafDirent, baseDir string) *DiffEntry { + de := new(DiffEntry) + de.Sha1 = dent.ID + de.DiffType = diffType + de.Status = status + de.Size = dent.Size + de.Name = filepath.Join(baseDir, dent.Name) + + return de +} + +func diffEntryNew(diffType, status rune, dirID, name string) *DiffEntry { + de := new(DiffEntry) + de.DiffType = diffType + de.Status = status + de.Sha1 = dirID + de.Name = name + + return de +} + +func DiffMergeRoots(storeID, mergedRoot, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error { + roots := []string{mergedRoot, p1Root, p2Root} + + opt := new(DiffOptions) + opt.RepoID = storeID + opt.FileCB = threewayDiffFiles + opt.DirCB = threewayDiffDirs + opt.Data = diffData{foldDirDiff, results} + + err := DiffTrees(roots, opt) + if err != nil { + err := fmt.Errorf("failed to diff trees: %v", err) + return err + } + + diffResolveRenames(results) + + return nil +} + +func threewayDiffFiles(baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error { + m := dents[0] + p1 := dents[1] + p2 := dents[2] + data, ok := optData.(diffData) + if !ok { + err := fmt.Errorf("failed to assert diff data") + return err + } + results := data.results + + if m != nil && p1 != nil && p2 != nil { + if !direntSame(m, p1) && !direntSame(m, p2) { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir) + *results = append(*results, de) + } + } else if m == nil && p1 != nil && p2 != nil { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir) + *results = append(*results, de) + } else if m != nil && p1 == nil && p2 != nil { + if !direntSame(m, p2) { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir) + *results = append(*results, de) + } + } else if m != nil && p1 != nil && p2 == nil { + if !direntSame(m, p1) { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir) + *results = append(*results, de) + } + } else if m != nil && p1 == nil && p2 == nil { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, m, baseDir) + *results = append(*results, de) + } + + return nil +} + +func threewayDiffDirs(baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error { + *recurse = true + return nil +} + +func DiffCommitRoots(storeID, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error { + roots := []string{p1Root, p2Root} + + opt := new(DiffOptions) + opt.RepoID = storeID + opt.FileCB = twowayDiffFiles + opt.DirCB = twowayDiffDirs + opt.Data = diffData{foldDirDiff, results} + + err := DiffTrees(roots, opt) + if err != nil { + err := fmt.Errorf("failed to diff trees: %v", err) + return err + } + + diffResolveRenames(results) + + return nil +} + +func DiffCommits(commit1, commit2 *commitmgr.Commit, results *[]*DiffEntry, foldDirDiff bool) error { + repo := repomgr.Get(commit1.RepoID) + if repo == nil { + err := fmt.Errorf("failed to get repo %s", commit1.RepoID) + return err + } + roots := []string{commit1.RootID, commit2.RootID} + + opt := new(DiffOptions) + opt.RepoID = repo.StoreID + opt.FileCB = twowayDiffFiles + opt.DirCB = twowayDiffDirs + opt.Data = diffData{foldDirDiff, results} + + err := DiffTrees(roots, opt) + if err != nil { + err := fmt.Errorf("failed to diff trees: %v", err) + return err + } + + diffResolveRenames(results) + + return nil +} + +func twowayDiffFiles(baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error { + p1 := dents[0] + p2 := dents[1] + data, ok := optData.(diffData) + if !ok { + err := fmt.Errorf("failed to assert diff data") + return err + } + results := data.results + + if p1 == nil { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, p2, baseDir) + *results = append(*results, de) + return nil + } + + if p2 == nil { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir) + *results = append(*results, de) + return nil + } + + if !direntSame(p1, p2) { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, p2, baseDir) + de.OriginSize = p1.Size + *results = append(*results, de) + } + + return nil +} + +func twowayDiffDirs(baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error { + p1 := dents[0] + p2 := dents[1] + data, ok := optData.(diffData) + if !ok { + err := fmt.Errorf("failed to assert diff data") + return err + } + results := data.results + + if p1 == nil { + if p2.ID == EmptySha1 || data.foldDirDiff { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirAdded, p2, baseDir) + *results = append(*results, de) + *recurse = false + } else { + *recurse = true + } + + return nil + } + + if p2 == nil { + de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirDeleted, p1, baseDir) + *results = append(*results, de) + if data.foldDirDiff { + *recurse = false + } else { + *recurse = true + } + } + + return nil +} + +func diffResolveRenames(des *[]*DiffEntry) error { + var deletedEmptyCount, deletedEmptyDirCount, addedEmptyCount, addedEmptyDirCount int + for _, de := range *des { + if de.Sha1 == EmptySha1 { + if de.Status == DiffStatusDeleted { + deletedEmptyCount++ + } + if de.Status == DiffStatusDirDeleted { + deletedEmptyDirCount++ + } + if de.Status == DiffStatusAdded { + addedEmptyCount++ + } + if de.Status == DiffStatusDirAdded { + addedEmptyDirCount++ + } + } + } + + deletedFiles := make(map[string]*DiffEntry) + deletedDirs := make(map[string]*DiffEntry) + var results []*DiffEntry + var added []*DiffEntry + + checkEmptyDir := (deletedEmptyDirCount == 1 && addedEmptyDirCount == 1) + checkEmptyFile := (deletedEmptyCount == 1 && addedEmptyCount == 1) + + for _, de := range *des { + if de.Status == DiffStatusDeleted { + if de.Sha1 == EmptySha1 && !checkEmptyFile { + results = append(results, de) + continue + } + deletedFiles[de.Sha1] = de + } + + if de.Status == DiffStatusDirDeleted { + if de.Sha1 == EmptySha1 && !checkEmptyDir { + results = append(results, de) + continue + } + deletedDirs[de.Sha1] = de + } + + if de.Status == DiffStatusAdded { + if de.Sha1 == EmptySha1 && !checkEmptyFile { + results = append(results, de) + continue + } + added = append(added, de) + } + + if de.Status == DiffStatusDirAdded { + if de.Sha1 == EmptySha1 && !checkEmptyDir { + results = append(results, de) + continue + } + + added = append(added, de) + } + + if de.Status == DiffStatusModified { + results = append(results, de) + } + } + + for _, de := range added { + var deAdd, deDel, deRename *DiffEntry + var renameStatus rune + + deAdd = de + if deAdd.Status == DiffStatusAdded { + deTmp, ok := deletedFiles[de.Sha1] + if !ok { + results = append(results, deAdd) + continue + } + deDel = deTmp + } else { + deTmp, ok := deletedDirs[de.Sha1] + if !ok { + results = append(results, deAdd) + continue + } + deDel = deTmp + } + + if deAdd.Status == DiffStatusDirAdded { + renameStatus = DiffStatusDirRenamed + } else { + renameStatus = DiffStatusRenamed + } + + deRename = diffEntryNew(deDel.DiffType, renameStatus, deDel.Sha1, deDel.Name) + deRename.NewName = de.Name + results = append(results, deRename) + if deDel.Status == DiffStatusDirDeleted { + delete(deletedDirs, deAdd.Sha1) + } else { + delete(deletedFiles, deAdd.Sha1) + } + } + + for _, de := range deletedFiles { + results = append(results, de) + } + + for _, de := range deletedDirs { + results = append(results, de) + } + *des = results + + return nil +} + +func DiffResultsToDesc(results []*DiffEntry) string { + var nAddMod, nRemoved, nRenamed int + var nNewDir, nRemovedDir int + var addModFile, removedFile string + var renamedFile string + var newDir, removedDir string + var desc string + + if results == nil { + return "" + } + + for _, de := range results { + switch de.Status { + case DiffStatusAdded: + if nAddMod == 0 { + addModFile = filepath.Base(de.Name) + } + nAddMod++ + case DiffStatusDeleted: + if nRemoved == 0 { + removedFile = filepath.Base(de.Name) + } + nRemoved++ + case DiffStatusRenamed: + if nRenamed == 0 { + renamedFile = filepath.Base(de.Name) + } + nRenamed++ + case DiffStatusModified: + if nAddMod == 0 { + addModFile = filepath.Base(de.Name) + } + nAddMod++ + case DiffStatusDirAdded: + if nNewDir == 0 { + newDir = filepath.Base(de.Name) + } + nNewDir++ + case DiffStatusDirDeleted: + if nRemovedDir == 0 { + removedDir = filepath.Base(de.Name) + } + nRemovedDir++ + } + } + + if nAddMod == 1 { + desc = fmt.Sprintf("Added or modified \"%s\".\n", addModFile) + } else if nAddMod > 1 { + desc = fmt.Sprintf("Added or modified \"%s\" and %d more files.\n", addModFile, nAddMod-1) + } + + if nRemoved == 1 { + desc += fmt.Sprintf("Deleted \"%s\".\n", removedFile) + } else if nRemoved > 1 { + desc += fmt.Sprintf("Deleted \"%s\" and %d more files.\n", removedFile, nRemoved-1) + } + + if nRenamed == 1 { + desc += fmt.Sprintf("Renamed \"%s\".\n", renamedFile) + } else if nRenamed > 1 { + desc += fmt.Sprintf("Renamed \"%s\" and %d more files.\n", renamedFile, nRenamed-1) + } + + if nNewDir == 1 { + desc += fmt.Sprintf("Added directory \"%s\".\n", newDir) + } else if nNewDir > 1 { + desc += fmt.Sprintf("Added \"%s\" and %d more directories.\n", newDir, nNewDir-1) + } + + if nRemovedDir == 1 { + desc += fmt.Sprintf("Removed directory \"%s\".\n", removedDir) + } else if nRemovedDir > 1 { + desc += fmt.Sprintf("Removed \"%s\" and %d more directories.\n", removedDir, nRemovedDir-1) + } + + return desc +} diff --git a/fileserver/diff/diff_test.go b/fileserver/diff/diff_test.go new file mode 100644 index 0000000..0a51258 --- /dev/null +++ b/fileserver/diff/diff_test.go @@ -0,0 +1,281 @@ +package diff + +import ( + "fmt" + "os" + "syscall" + "testing" + + "github.com/haiwen/seafile-server/fileserver/fsmgr" +) + +const ( + emptySHA1 = "0000000000000000000000000000000000000000" + diffTestSeafileConfPath = "/tmp/conf" + diffTestSeafileDataDir = "/tmp/conf/seafile-data" + diffTestRepoID = "0d18a711-c988-4f7b-960c-211b34705ce3" +) + +var diffTestTree1 string +var diffTestTree2 string +var diffTestTree3 string +var diffTestTree4 string +var diffTestFileID string +var diffTestDirID1 string +var diffTestDirID2 string + +/* + test directory structure: + + tree1 + |-- + + tree2 + |--file + + tree3 + |--dir + + tree4 + |--dir + |-- file + +*/ + +func TestDiffTrees(t *testing.T) { + fsmgr.Init(diffTestSeafileConfPath, diffTestSeafileDataDir) + + err := diffTestCreateTestDir() + if err != nil { + fmt.Printf("failed to create test dir: %v", err) + os.Exit(1) + } + + t.Run("test1", testDiffTrees1) + t.Run("test2", testDiffTrees2) + t.Run("test3", testDiffTrees3) + t.Run("test4", testDiffTrees4) + t.Run("test5", testDiffTrees5) + + err = diffTestDelFile() + if err != nil { + fmt.Printf("failed to remove test file : %v", err) + } +} + +func diffTestCreateTestDir() error { + modeDir := uint32(syscall.S_IFDIR | 0644) + modeFile := uint32(syscall.S_IFREG | 0644) + + dir1, err := diffTestCreateSeafdir(nil) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + diffTestTree1 = dir1 + file1, err := fsmgr.NewSeafile(1, 1, nil) + if err != nil { + err := fmt.Errorf("failed to new seafile: %v", err) + return err + } + diffTestFileID = file1.FileID + err = fsmgr.SaveSeafile(diffTestRepoID, file1) + if err != nil { + err := fmt.Errorf("failed to save seafile: %v", err) + return err + } + dent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: "file", Mode: modeFile, Size: 1} + dir2, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + diffTestTree2 = dir2 + + dent2 := fsmgr.SeafDirent{ID: dir1, Name: "dir", Mode: modeDir} + diffTestDirID1 = dir1 + dir3, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + diffTestTree3 = dir3 + + dent3 := fsmgr.SeafDirent{ID: dir2, Name: "dir", Mode: modeDir} + diffTestDirID2 = dir2 + dir4, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + diffTestTree4 = dir4 + + return nil +} + +func testDiffTrees1(t *testing.T) { + var results []interface{} + opt := &DiffOptions{ + FileCB: diffTestFileCB, + DirCB: diffTestDirCB, + RepoID: diffTestRepoID} + opt.Data = &results + DiffTrees([]string{diffTestTree2, diffTestTree1}, opt) + if len(results) != 1 { + t.Errorf("data length is %d not 1", len(results)) + } + var ret = make([]string, len(results)) + for k, v := range results { + ret[k] = fmt.Sprintf("%s", v) + } + if ret[0] != diffTestFileID { + t.Errorf("result %s != %s", ret[0], diffTestFileID) + } +} + +func testDiffTrees2(t *testing.T) { + var results []interface{} + opt := &DiffOptions{ + FileCB: diffTestFileCB, + DirCB: diffTestDirCB, + RepoID: diffTestRepoID} + opt.Data = &results + DiffTrees([]string{diffTestTree3, diffTestTree1}, opt) + if len(results) != 1 { + t.Errorf("data length is %d not 1", len(results)) + } + var ret = make([]string, len(results)) + for k, v := range results { + ret[k] = fmt.Sprintf("%s", v) + } + if ret[0] != diffTestDirID1 { + t.Errorf("result %s != %s", ret[0], diffTestDirID1) + } + +} + +func testDiffTrees3(t *testing.T) { + var results []interface{} + opt := &DiffOptions{ + FileCB: diffTestFileCB, + DirCB: diffTestDirCB, + RepoID: diffTestRepoID} + opt.Data = &results + DiffTrees([]string{diffTestTree4, diffTestTree1}, opt) + if len(results) != 2 { + t.Errorf("data length is %d not 1", len(results)) + } + + var ret = make([]string, len(results)) + for k, v := range results { + ret[k] = fmt.Sprintf("%s", v) + } + if ret[0] != diffTestDirID2 { + t.Errorf("result %s != %s", ret[0], diffTestDirID2) + } + if ret[1] != diffTestFileID { + t.Errorf("result %s != %s", ret[1], diffTestFileID) + } +} + +func testDiffTrees4(t *testing.T) { + var results []interface{} + opt := &DiffOptions{ + FileCB: diffTestFileCB, + DirCB: diffTestDirCB, + RepoID: diffTestRepoID} + opt.Data = &results + DiffTrees([]string{diffTestTree4, diffTestTree3}, opt) + if len(results) != 2 { + t.Errorf("data length is %d not 1", len(results)) + } + + var ret = make([]string, len(results)) + for k, v := range results { + ret[k] = fmt.Sprintf("%s", v) + } + if ret[0] != diffTestDirID2 { + t.Errorf("result %s != %s", ret[0], diffTestDirID2) + } + if ret[1] != diffTestFileID { + t.Errorf("result %s != %s", ret[1], diffTestFileID) + } +} + +func testDiffTrees5(t *testing.T) { + var results []interface{} + opt := &DiffOptions{ + FileCB: diffTestFileCB, + DirCB: diffTestDirCB, + RepoID: diffTestRepoID} + opt.Data = &results + DiffTrees([]string{diffTestTree3, diffTestTree2}, opt) + if len(results) != 1 { + t.Errorf("data length is %d not 1", len(results)) + } + var ret = make([]string, len(results)) + for k, v := range results { + ret[k] = fmt.Sprintf("%s", v) + } + if ret[0] != diffTestDirID1 { + t.Errorf("result %s != %s", ret[0], diffTestDirID1) + } +} + +func diffTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) { + seafdir, err := fsmgr.NewSeafdir(1, dents) + if err != nil { + return "", err + } + err = fsmgr.SaveSeafdir(diffTestRepoID, seafdir) + if err != nil { + return "", err + } + + return seafdir.DirID, nil +} + +func diffTestDelFile() error { + err := os.RemoveAll(diffTestSeafileConfPath) + if err != nil { + return err + } + + return nil +} + +func diffTestFileCB(baseDir string, files []*fsmgr.SeafDirent, data interface{}) error { + file1 := files[0] + file2 := files[1] + results, ok := data.(*[]interface{}) + if !ok { + err := fmt.Errorf("failed to assert results") + return err + } + + if file1 != nil && + (file2 == nil || file1.ID != file2.ID) && + file1.ID != emptySHA1 { + *results = append(*results, file1.ID) + } + + return nil +} + +func diffTestDirCB(baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error { + dir1 := dirs[0] + dir2 := dirs[1] + results, ok := data.(*[]interface{}) + if !ok { + err := fmt.Errorf("failed to assert results") + return err + } + + if dir1 != nil && + (dir2 == nil || dir1.ID != dir2.ID) && + dir1.ID != emptySHA1 { + *results = append(*results, dir1.ID) + } + + return nil +} diff --git a/fileserver/fileop.go b/fileserver/fileop.go new file mode 100644 index 0000000..875db19 --- /dev/null +++ b/fileserver/fileop.go @@ -0,0 +1,3069 @@ +package main + +import ( + "archive/zip" + "bytes" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "mime/multipart" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "database/sql" + "math/rand" + "sort" + "syscall" + + "github.com/haiwen/seafile-server/fileserver/blockmgr" + "github.com/haiwen/seafile-server/fileserver/commitmgr" + "github.com/haiwen/seafile-server/fileserver/diff" + "github.com/haiwen/seafile-server/fileserver/fsmgr" + "github.com/haiwen/seafile-server/fileserver/repomgr" +) + +// Dirents is an alias for slice of SeafDirent. +type Dirents []*fsmgr.SeafDirent + +func (d Dirents) Less(i, j int) bool { + return d[i].Name > d[j].Name +} + +func (d Dirents) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} +func (d Dirents) Len() int { + return len(d) +} + +func initUpload() { + objDir := filepath.Join(dataDir, "httptemp", "cluster-shared") + os.MkdirAll(objDir, os.ModePerm) +} + +//contentType = "application/octet-stream" +func parseContentType(fileName string) string { + var contentType string + + parts := strings.Split(fileName, ".") + if len(parts) >= 2 { + suffix := parts[len(parts)-1] + switch suffix { + case "txt": + contentType = "text/plain" + case "doc": + contentType = "application/vnd.ms-word" + case "docx": + contentType = "application/vnd.openxmlformats-officedocument.wordprocessingml.document" + case "ppt": + contentType = "application/vnd.ms-powerpoint" + case "xls": + contentType = "application/vnd.ms-excel" + case "xlsx": + contentType = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" + case "pdf": + contentType = "application/pdf" + case "zip": + contentType = "application/zip" + case "mp3": + contentType = "audio/mp3" + case "mpeg": + contentType = "video/mpeg" + case "mp4": + contentType = "video/mp4" + case "jpeg", "JPEG", "jpg", "JPG": + contentType = "image/jpeg" + case "png", "PNG": + contentType = "image/png" + case "gif", "GIF": + contentType = "image/gif" + case "svg", "SVG": + contentType = "image/svg+xml" + } + } + + return contentType +} + +func testFireFox(r *http.Request) bool { + userAgent, ok := r.Header["User-Agent"] + if !ok { + return false + } + + userAgentStr := strings.Join(userAgent, "") + if strings.Index(userAgentStr, "firefox") != -1 { + return true + } + + return false +} + +func accessCB(rsp http.ResponseWriter, r *http.Request) *appError { + parts := strings.Split(r.URL.Path[1:], "/") + if len(parts) < 3 { + msg := "Invalid URL" + return &appError{nil, msg, http.StatusBadRequest} + } + token := parts[1] + fileName := parts[2] + accessInfo, err := parseWebaccessInfo(token) + if err != nil { + return err + } + + repoID := accessInfo.repoID + op := accessInfo.op + user := accessInfo.user + objID := accessInfo.objID + + if op != "view" && op != "download" && op != "download-link" { + msg := "Bad access token" + return &appError{nil, msg, http.StatusBadRequest} + } + + if _, ok := r.Header["If-Modified-Since"]; ok { + return &appError{nil, "", http.StatusNotModified} + } + + now := time.Now() + rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT")) + rsp.Header().Set("Cache-Control", "max-age=3600") + + ranges := r.Header["Range"] + byteRanges := strings.Join(ranges, "") + + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Bad repo id" + return &appError{nil, msg, http.StatusBadRequest} + } + + var cryptKey *seafileCrypt + if repo.IsEncrypted { + key, err := parseCryptKey(rsp, repoID, user) + if err != nil { + return err + } + cryptKey = key + } + + exists, _ := fsmgr.Exists(repo.StoreID, objID) + if !exists { + msg := "Invalid file id" + return &appError{nil, msg, http.StatusBadRequest} + } + + if !repo.IsEncrypted && len(byteRanges) != 0 { + if err := doFileRange(rsp, r, repo, objID, fileName, op, byteRanges, user); err != nil { + return err + } + } else if err := doFile(rsp, r, repo, objID, fileName, op, cryptKey, user); err != nil { + return err + } + + return nil +} + +type seafileCrypt struct { + key []byte + iv []byte +} + +func parseCryptKey(rsp http.ResponseWriter, repoID string, user string) (*seafileCrypt, *appError) { + key, err := rpcclient.Call("seafile_get_decrypt_key", repoID, user) + if err != nil { + errMessage := "Repo is encrypted. Please provide password to view it." + return nil, &appError{nil, errMessage, http.StatusBadRequest} + } + + cryptKey, ok := key.(map[string]interface{}) + if !ok { + err := fmt.Errorf("failed to assert crypt key") + return nil, &appError{err, "", http.StatusInternalServerError} + } + + seafileKey := new(seafileCrypt) + + if cryptKey != nil { + key, ok := cryptKey["key"].(string) + if !ok { + err := fmt.Errorf("failed to parse crypt key") + return nil, &appError{err, "", http.StatusInternalServerError} + } + iv, ok := cryptKey["iv"].(string) + if !ok { + err := fmt.Errorf("failed to parse crypt iv") + return nil, &appError{err, "", http.StatusInternalServerError} + } + seafileKey.key, err = hex.DecodeString(key) + if err != nil { + err := fmt.Errorf("failed to decode key: %v", err) + return nil, &appError{err, "", http.StatusInternalServerError} + } + seafileKey.iv, err = hex.DecodeString(iv) + if err != nil { + err := fmt.Errorf("failed to decode iv: %v", err) + return nil, &appError{err, "", http.StatusInternalServerError} + } + } + + return seafileKey, nil +} + +func doFile(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string, + fileName string, operation string, cryptKey *seafileCrypt, user string) *appError { + file, err := fsmgr.GetSeafile(repo.StoreID, fileID) + if err != nil { + msg := "Failed to get seafile" + return &appError{nil, msg, http.StatusBadRequest} + } + + var encKey, encIv []byte + if cryptKey != nil { + encKey = cryptKey.key + encIv = cryptKey.iv + } + + rsp.Header().Set("Access-Control-Allow-Origin", "*") + + setCommonHeaders(rsp, r, operation, fileName) + + //filesize string + fileSize := fmt.Sprintf("%d", file.FileSize) + rsp.Header().Set("Content-Length", fileSize) + + if r.Method == "HEAD" { + rsp.WriteHeader(http.StatusOK) + return nil + } + if file.FileSize == 0 { + rsp.WriteHeader(http.StatusOK) + return nil + } + + if cryptKey != nil { + for _, blkID := range file.BlkIDs { + var buf bytes.Buffer + blockmgr.Read(repo.StoreID, blkID, &buf) + decoded, err := decrypt(buf.Bytes(), encKey, encIv) + if err != nil { + err := fmt.Errorf("failed to decrypt block %s: %v", blkID, err) + return &appError{err, "", http.StatusInternalServerError} + } + _, err = rsp.Write(decoded) + if err != nil { + log.Printf("failed to write block %s to response: %v", blkID, err) + return nil + } + } + return nil + } + + for _, blkID := range file.BlkIDs { + err := blockmgr.Read(repo.StoreID, blkID, rsp) + if err != nil { + log.Printf("fatild to write block %s to response: %v", blkID, err) + return nil + } + } + + return nil +} + +func doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string, + fileName string, operation string, byteRanges string, user string) *appError { + + file, err := fsmgr.GetSeafile(repo.StoreID, fileID) + if err != nil { + msg := "Failed to get seafile" + return &appError{nil, msg, http.StatusBadRequest} + } + + if file.FileSize == 0 { + rsp.WriteHeader(http.StatusOK) + return nil + } + + start, end, ok := parseRange(byteRanges, file.FileSize) + if !ok { + conRange := fmt.Sprintf("bytes */%d", file.FileSize) + rsp.Header().Set("Content-Range", conRange) + return &appError{nil, "", http.StatusRequestedRangeNotSatisfiable} + } + + rsp.Header().Set("Accept-Ranges", "bytes") + + setCommonHeaders(rsp, r, operation, fileName) + + //filesize string + conLen := fmt.Sprintf("%d", end-start+1) + rsp.Header().Set("Content-Length", conLen) + + conRange := fmt.Sprintf("bytes %d-%d/%d", start, end, file.FileSize) + rsp.Header().Set("Content-Range", conRange) + + var blkSize []uint64 + for _, v := range file.BlkIDs { + size, err := blockmgr.Stat(repo.StoreID, v) + if err != nil { + err := fmt.Errorf("failed to stat block %s : %v", v, err) + return &appError{err, "", http.StatusInternalServerError} + } + blkSize = append(blkSize, uint64(size)) + } + + var off uint64 + var pos uint64 + var startBlock int + for i, v := range blkSize { + pos = start - off + off += v + if off > start { + startBlock = i + break + } + } + + // Read block from the start block and specified position + var i int + for ; i < len(file.BlkIDs); i++ { + if i < startBlock { + continue + } + + blkID := file.BlkIDs[i] + var buf bytes.Buffer + if end-start+1 <= blkSize[i]-pos { + err := blockmgr.Read(repo.StoreID, blkID, &buf) + if err != nil { + log.Printf("failed to read block %s: %v", blkID, err) + return nil + } + recvBuf := buf.Bytes() + _, err = rsp.Write(recvBuf[pos : pos+end-start+1]) + if err != nil { + log.Printf("failed to write block %s to response: %v", blkID, err) + } + return nil + } + + err := blockmgr.Read(repo.StoreID, blkID, &buf) + if err != nil { + log.Printf("failed to read block %s: %v", blkID, err) + return nil + } + recvBuf := buf.Bytes() + _, err = rsp.Write(recvBuf[pos:]) + if err != nil { + log.Printf("failed to write block %s to response: %v", blkID, err) + return nil + } + start += blkSize[i] - pos + i++ + break + } + + // Always read block from the remaining block and pos=0 + for ; i < len(file.BlkIDs); i++ { + blkID := file.BlkIDs[i] + var buf bytes.Buffer + if end-start+1 <= blkSize[i] { + err := blockmgr.Read(repo.StoreID, blkID, &buf) + if err != nil { + log.Printf("failed to read block %s: %v", blkID, err) + return nil + } + recvBuf := buf.Bytes() + _, err = rsp.Write(recvBuf[:end-start+1]) + if err != nil { + log.Printf("failed to write block %s to response: %v", blkID, err) + return nil + } + break + } else { + err := blockmgr.Read(repo.StoreID, blkID, rsp) + if err != nil { + log.Printf("failed to write block %s to response: %v", blkID, err) + return nil + } + start += blkSize[i] + } + } + + return nil +} + +func parseRange(byteRanges string, fileSize uint64) (uint64, uint64, bool) { + start := strings.Index(byteRanges, "=") + end := strings.Index(byteRanges, "-") + + if end < 0 { + return 0, 0, false + } + + var startByte, endByte uint64 + + if start+1 == end { + retByte, err := strconv.ParseUint(byteRanges[end+1:], 10, 64) + if err != nil || retByte == 0 { + return 0, 0, false + } + startByte = fileSize - retByte + endByte = fileSize - 1 + } else if end+1 == len(byteRanges) { + firstByte, err := strconv.ParseUint(byteRanges[start+1:end], 10, 64) + if err != nil { + return 0, 0, false + } + + startByte = firstByte + endByte = fileSize - 1 + } else { + firstByte, err := strconv.ParseUint(byteRanges[start+1:end], 10, 64) + if err != nil { + return 0, 0, false + } + lastByte, err := strconv.ParseUint(byteRanges[end+1:], 10, 64) + if err != nil { + return 0, 0, false + } + + if lastByte > fileSize-1 { + lastByte = fileSize - 1 + } + + startByte = firstByte + endByte = lastByte + } + + if startByte > endByte { + return 0, 0, false + } + + return startByte, endByte, true +} + +func setCommonHeaders(rsp http.ResponseWriter, r *http.Request, operation, fileName string) { + fileType := parseContentType(fileName) + if fileType != "" { + var contentType string + if strings.Index(fileType, "text") != -1 { + contentType = fileType + "; " + "charset=gbk" + } else { + contentType = fileType + } + rsp.Header().Set("Content-Type", contentType) + } else { + rsp.Header().Set("Content-Type", "application/octet-stream") + } + + var contFileName string + if operation == "download" || operation == "download-link" || + operation == "downloadblks" { + if testFireFox(r) { + contFileName = fmt.Sprintf("attachment;filename*=\"utf-8' '%s\"", fileName) + } else { + contFileName = fmt.Sprintf("attachment;filename*=\"%s\"", fileName) + } + } else { + if testFireFox(r) { + contFileName = fmt.Sprintf("inline;filename*=\"utf-8' '%s\"", fileName) + } else { + contFileName = fmt.Sprintf("inline;filename=\"%s\"", fileName) + } + } + rsp.Header().Set("Content-Disposition", contFileName) + + if fileType != "image/jpg" { + rsp.Header().Set("X-Content-Type-Options", "nosniff") + } +} + +func accessBlksCB(rsp http.ResponseWriter, r *http.Request) *appError { + parts := strings.Split(r.URL.Path[1:], "/") + if len(parts) < 3 { + msg := "Invalid URL" + return &appError{nil, msg, http.StatusBadRequest} + } + token := parts[1] + blkID := parts[2] + accessInfo, err := parseWebaccessInfo(token) + if err != nil { + return err + } + repoID := accessInfo.repoID + op := accessInfo.op + user := accessInfo.user + id := accessInfo.objID + + if _, ok := r.Header["If-Modified-Since"]; ok { + return &appError{nil, "", http.StatusNotModified} + } + + now := time.Now() + rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT")) + rsp.Header().Set("Cache-Control", "max-age=3600") + + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Bad repo id" + return &appError{nil, msg, http.StatusBadRequest} + } + + exists, _ := fsmgr.Exists(repo.StoreID, id) + if !exists { + msg := "Invalid file id" + return &appError{nil, msg, http.StatusBadRequest} + } + + if op != "downloadblks" { + msg := "Bad access token" + return &appError{nil, msg, http.StatusBadRequest} + } + + if err := doBlock(rsp, r, repo, id, user, blkID); err != nil { + return err + } + + return nil +} + +func doBlock(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string, + user string, blkID string) *appError { + file, err := fsmgr.GetSeafile(repo.StoreID, fileID) + if err != nil { + msg := "Failed to get seafile" + return &appError{nil, msg, http.StatusBadRequest} + } + + var found bool + for _, id := range file.BlkIDs { + if id == blkID { + found = true + break + } + } + + if !found { + rsp.WriteHeader(http.StatusBadRequest) + return nil + } + + exists := blockmgr.Exists(repo.StoreID, blkID) + if !exists { + rsp.WriteHeader(http.StatusBadRequest) + return nil + } + + rsp.Header().Set("Access-Control-Allow-Origin", "*") + setCommonHeaders(rsp, r, "downloadblks", blkID) + + size, err := blockmgr.Stat(repo.StoreID, blkID) + if err != nil { + msg := "Failed to stat block" + return &appError{nil, msg, http.StatusBadRequest} + } + if size == 0 { + rsp.WriteHeader(http.StatusOK) + return nil + } + + fileSize := fmt.Sprintf("%d", size) + rsp.Header().Set("Content-Length", fileSize) + + err = blockmgr.Read(repo.StoreID, blkID, rsp) + if err != nil { + log.Printf("fatild to write block %s to response: %v", blkID, err) + } + + return nil +} + +func accessZipCB(rsp http.ResponseWriter, r *http.Request) *appError { + parts := strings.Split(r.URL.Path[1:], "/") + if len(parts) != 2 { + msg := "Invalid URL" + return &appError{nil, msg, http.StatusBadRequest} + } + token := parts[1] + + accessInfo, err := parseWebaccessInfo(token) + if err != nil { + return err + } + + repoID := accessInfo.repoID + op := accessInfo.op + user := accessInfo.user + data := accessInfo.objID + + if op != "download-dir" && op != "download-dir-link" && + op != "download-multi" && op != "download-multi-link" { + err := fmt.Errorf("wrong operation of token: %s", op) + return &appError{err, "", http.StatusInternalServerError} + } + + if _, ok := r.Header["If-Modified-Since"]; ok { + return &appError{nil, "", http.StatusNotModified} + } + + now := time.Now() + rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT")) + rsp.Header().Set("Cache-Control", "max-age=3600") + + if err := downloadZipFile(rsp, r, data, repoID, user, op); err != nil { + return err + } + + return nil +} + +func downloadZipFile(rsp http.ResponseWriter, r *http.Request, data, repoID, user, op string) *appError { + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Failed to get repo" + return &appError{nil, msg, http.StatusBadRequest} + } + + obj := make(map[string]interface{}) + err := json.Unmarshal([]byte(data), &obj) + if err != nil { + err := fmt.Errorf("failed to parse obj data for zip: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + + ar := zip.NewWriter(rsp) + defer ar.Close() + + if op == "download-dir" || op == "download-dir-link" { + dirName, ok := obj["dir_name"].(string) + if !ok || dirName == "" { + err := fmt.Errorf("invalid download dir data: miss dir_name field") + return &appError{err, "", http.StatusInternalServerError} + } + + objID, ok := obj["obj_id"].(string) + if !ok || objID == "" { + err := fmt.Errorf("invalid download dir data: miss obj_id field") + return &appError{err, "", http.StatusInternalServerError} + } + + setCommonHeaders(rsp, r, "download", dirName) + + err := packDir(ar, repo, objID, dirName) + if err != nil { + log.Printf("failed to pack dir %s: %v", dirName, err) + return nil + } + } else { + dirList, err := parseDirFilelist(repo, obj) + if err != nil { + return &appError{err, "", http.StatusInternalServerError} + } + + now := time.Now() + zipName := fmt.Sprintf("documents-export-%d-%d-%d.zip", now.Year(), now.Month(), now.Day()) + + setCommonHeaders(rsp, r, "download", zipName) + + for _, v := range dirList { + if fsmgr.IsDir(v.Mode) { + if err := packDir(ar, repo, v.ID, v.Name); err != nil { + log.Printf("failed to pack dir %s: %v", v.Name, err) + return nil + } + } else { + if err := packFiles(ar, &v, repo, ""); err != nil { + log.Printf("failed to pack file %s: %v", v.Name, err) + return nil + } + } + } + } + + return nil +} + +func parseDirFilelist(repo *repomgr.Repo, obj map[string]interface{}) ([]fsmgr.SeafDirent, error) { + parentDir, ok := obj["parent_dir"].(string) + if !ok || parentDir == "" { + err := fmt.Errorf("invalid download multi data, miss parent_dir field") + return nil, err + } + + dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, repo.RootID, parentDir) + if err != nil { + err := fmt.Errorf("failed to get dir %s repo %s", parentDir, repo.StoreID) + return nil, err + } + + fileList, ok := obj["file_list"].([]interface{}) + if !ok || fileList == nil { + err := fmt.Errorf("invalid download multi data, miss file_list field") + return nil, err + } + + direntHash := make(map[string]fsmgr.SeafDirent) + for _, v := range dir.Entries { + direntHash[v.Name] = *v + } + + direntList := make([]fsmgr.SeafDirent, 0) + + for _, fileName := range fileList { + name, ok := fileName.(string) + if !ok { + err := fmt.Errorf("invalid download multi data") + return nil, err + } + + v, ok := direntHash[name] + if !ok { + err := fmt.Errorf("invalid download multi data") + return nil, err + } + + direntList = append(direntList, v) + } + + return direntList, nil +} + +func packDir(ar *zip.Writer, repo *repomgr.Repo, dirID, dirPath string) error { + dirent, err := fsmgr.GetSeafdir(repo.StoreID, dirID) + if err != nil { + err := fmt.Errorf("failed to get dir for zip: %v", err) + return err + } + + if dirent.Entries == nil { + fileDir := filepath.Join(dirPath) + fileDir = strings.TrimLeft(fileDir, "/") + _, err := ar.Create(fileDir + "/") + if err != nil { + err := fmt.Errorf("failed to create zip dir: %v", err) + return err + } + + return nil + } + + entries := dirent.Entries + + for _, v := range entries { + fileDir := filepath.Join(dirPath, v.Name) + fileDir = strings.TrimLeft(fileDir, "/") + if fsmgr.IsDir(v.Mode) { + if err := packDir(ar, repo, v.ID, fileDir); err != nil { + return err + } + } else { + if err := packFiles(ar, v, repo, dirPath); err != nil { + return err + } + } + } + + return nil +} + +func packFiles(ar *zip.Writer, dirent *fsmgr.SeafDirent, repo *repomgr.Repo, parentPath string) error { + file, err := fsmgr.GetSeafile(repo.StoreID, dirent.ID) + if err != nil { + err := fmt.Errorf("failed to get seafile : %v", err) + return err + } + + filePath := filepath.Join(parentPath, dirent.Name) + filePath = strings.TrimLeft(filePath, "/") + + fileHeader := new(zip.FileHeader) + fileHeader.Name = filePath + fileHeader.Modified = time.Unix(dirent.Mtime, 0) + fileHeader.Method = zip.Deflate + zipFile, err := ar.CreateHeader(fileHeader) + if err != nil { + err := fmt.Errorf("failed to create zip file : %v", err) + return err + } + + for _, blkID := range file.BlkIDs { + err := blockmgr.Read(repo.StoreID, blkID, zipFile) + if err != nil { + return err + } + } + + return nil +} + +type recvData struct { + parentDir string + tokenType string + repoID string + user string + rstart int64 + rend int64 + fsize int64 + fileNames []string + files []string + fileHeaders []*multipart.FileHeader +} + +func uploadAPICB(rsp http.ResponseWriter, r *http.Request) *appError { + fsm, err := parseUploadHeaders(r) + if err != nil { + return err + } + + if err := doUpload(rsp, r, fsm, false); err != nil { + formatJSONError(rsp, err) + return err + } + + return nil +} + +func uploadAjaxCB(rsp http.ResponseWriter, r *http.Request) *appError { + fsm, err := parseUploadHeaders(r) + if err != nil { + return err + } + + if err := doUpload(rsp, r, fsm, true); err != nil { + formatJSONError(rsp, err) + return err + } + + return nil +} + +func formatJSONError(rsp http.ResponseWriter, err *appError) { + if err.Message != "" { + rsp.Header().Set("Content-Type", "application/json; charset=utf-8") + err.Message = fmt.Sprintf("\"error\": \"%s\"", err.Message) + } +} + +func doUpload(rsp http.ResponseWriter, r *http.Request, fsm *recvData, isAjax bool) *appError { + rsp.Header().Set("Access-Control-Allow-Origin", "*") + rsp.Header().Set("Access-Control-Allow-Headers", "x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization") + rsp.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS") + rsp.Header().Set("Access-Control-Max-Age", "86400") + + if r.Method == "OPTIONS" { + rsp.WriteHeader(http.StatusOK) + return nil + } + + if err := r.ParseMultipartForm(1 << 20); err != nil { + return &appError{nil, "", http.StatusBadRequest} + } + defer r.MultipartForm.RemoveAll() + + repoID := fsm.repoID + user := fsm.user + + replaceStr := r.FormValue("replace") + var replaceExisted bool + if replaceStr != "" { + replace, err := strconv.ParseInt(replaceStr, 10, 64) + if err != nil || (replace != 0 && replace != 1) { + msg := "Invalid argument.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + if replace == 1 { + replaceExisted = true + } + } + + parentDir := r.FormValue("parent_dir") + if parentDir == "" { + msg := "Invalid URL.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + relativePath := r.FormValue("relative_path") + if relativePath != "" { + if relativePath[0] == '/' || relativePath[0] == '\\' { + msg := "Invalid relative path" + return &appError{nil, msg, http.StatusBadRequest} + } + } + + newParentDir := filepath.Join("/", parentDir, relativePath) + defer clearTmpFile(fsm, newParentDir) + + if fsm.rstart >= 0 { + if parentDir[0] != '/' { + msg := "Invalid parent dir" + return &appError{nil, msg, http.StatusBadRequest} + } + + formFiles := r.MultipartForm.File + files, ok := formFiles["file"] + if !ok { + msg := "No file in multipart form.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + if len(files) > 1 { + msg := "More files in one request" + return &appError{nil, msg, http.StatusBadRequest} + } + + err := writeBlockDataToTmpFile(r, fsm, formFiles, repoID, newParentDir) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to write block data to tmp file: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + + if fsm.rend != fsm.fsize-1 { + success := "{\"success\": true}" + _, err := rsp.Write([]byte(success)) + if err != nil { + log.Printf("failed to write data to response") + } + rsp.Header().Set("Content-Type", "application/json; charset=utf-8") + + return nil + } + } else { + formFiles := r.MultipartForm.File + fileHeaders, ok := formFiles["file"] + if !ok { + msg := "No file in multipart form.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + for _, handler := range fileHeaders { + fileName := filepath.Base(handler.Filename) + fsm.fileNames = append(fsm.fileNames, fileName) + fsm.fileHeaders = append(fsm.fileHeaders, handler) + } + } + + if fsm.fileNames == nil { + msg := "No file.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + if err := checkParentDir(repoID, parentDir); err != nil { + return err + } + + if !isParentMatched(fsm.parentDir, parentDir) { + msg := "Permission denied." + return &appError{nil, msg, http.StatusForbidden} + } + + if err := checkTmpFileList(fsm); err != nil { + return err + } + + var contentLen int64 + if fsm.fsize > 0 { + contentLen = fsm.fsize + } else { + lenstr := rsp.Header().Get("Content-Length") + if lenstr == "" { + contentLen = -1 + } else { + tmpLen, err := strconv.ParseInt(lenstr, 10, 64) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to parse content len: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + contentLen = tmpLen + } + } + + ret, err := checkQuota(repoID, contentLen) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to check quota: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + if ret == 1 { + msg := "Out of quota.\n" + return &appError{nil, msg, seafHTTPResNoQuota} + } + + if err := createRelativePath(repoID, parentDir, relativePath, user); err != nil { + return err + } + + if err := postMultiFiles(rsp, r, repoID, newParentDir, user, fsm, + replaceExisted, isAjax); err != nil { + return err + } + + rsp.Header().Set("Content-Type", "application/json; charset=utf-8") + + oper := "web-file-upload" + if fsm.tokenType == "upload-link" { + oper = "link-file-upload" + } + + sendStatisticMsg(repoID, user, oper, uint64(contentLen)) + + return nil +} + +func writeBlockDataToTmpFile(r *http.Request, fsm *recvData, formFiles map[string][]*multipart.FileHeader, + repoID, parentDir string) error { + httpTempDir := filepath.Join(absDataDir, "httptemp") + + fileHeaders, ok := formFiles["file"] + if !ok { + err := fmt.Errorf("failed to get file from multipart form") + return err + } + + disposition := r.Header.Get("Content-Disposition") + if disposition == "" { + err := fmt.Errorf("missing content disposition") + return err + } + + _, params, err := mime.ParseMediaType(disposition) + if err != nil { + err := fmt.Errorf("failed to parse Content-Disposition: %v", err) + return err + } + filename, err := url.QueryUnescape(params["filename"]) + if err != nil { + err := fmt.Errorf("failed to get filename: %v", err) + return err + } + + handler := fileHeaders[0] + file, err := handler.Open() + if err != nil { + err := fmt.Errorf("failed to open file for read: %v", err) + return err + } + defer file.Close() + + var f *os.File + //filename := handler.Filename + filePath := filepath.Join("/", parentDir, filename) + tmpFile, err := repomgr.GetUploadTmpFile(repoID, filePath) + if err != nil || tmpFile == "" { + tmpDir := filepath.Join(httpTempDir, "cluster-shared") + f, err = ioutil.TempFile(tmpDir, filename) + if err != nil { + return err + } + repomgr.AddUploadTmpFile(repoID, filePath, f.Name()) + tmpFile = f.Name() + } else { + f, err = os.OpenFile(tmpFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return err + } + } + + if fsm.rend == fsm.fsize-1 { + fileName := filepath.Base(filename) + fsm.fileNames = append(fsm.fileNames, fileName) + fsm.files = append(fsm.files, tmpFile) + } + + f.Seek(fsm.rstart, 0) + io.Copy(f, file) + f.Close() + + return nil +} + +func createRelativePath(repoID, parentDir, relativePath, user string) *appError { + if relativePath == "" { + return nil + } + + err := mkdirWithParents(repoID, parentDir, relativePath, user) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("Failed to create parent directory: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + + return nil +} + +func mkdirWithParents(repoID, parentDir, newDirPath, user string) error { + repo := repomgr.Get(repoID) + if repo == nil { + err := fmt.Errorf("failed to get repo %s", repoID) + return err + } + + headCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) + return err + } + + relativeDirCan := getCanonPath(newDirPath) + + subFolders := strings.Split(relativeDirCan, "/") + + for _, name := range subFolders { + if name == "" { + continue + } + if shouldIgnoreFile(name) { + err := fmt.Errorf("invalid dir name %s", name) + return err + } + } + + var rootID string + var parentDirCan string + if parentDir == "/" || parentDir == "\\" { + parentDirCan = "/" + } else { + parentDirCan = getCanonPath(parentDir) + } + + absPath, dirID, err := checkAndCreateDir(repo, headCommit.RootID, parentDirCan, subFolders) + if err != nil { + err := fmt.Errorf("failed to check and create dir: %v", err) + return err + } + if absPath == "" { + return nil + } + newRootID := headCommit.RootID + mtime := time.Now().Unix() + mode := (syscall.S_IFDIR | 0644) + dent := fsmgr.NewDirent(dirID, filepath.Base(absPath), uint32(mode), mtime, "", 0) + + var names []string + rootID, _ = doPostMultiFiles(repo, newRootID, filepath.Dir(absPath), []*fsmgr.SeafDirent{dent}, user, false, &names) + if rootID == "" { + err := fmt.Errorf("failed to put dir") + return err + } + + buf := fmt.Sprintf("Added directory \"%s\"", relativeDirCan) + _, err = genNewCommit(repo, headCommit, rootID, user, buf) + if err != nil { + err := fmt.Errorf("failed to generate new commit: %v", err) + return err + } + + go mergeVirtualRepo(repo.ID, "") + + return nil +} + +func checkAndCreateDir(repo *repomgr.Repo, rootID, parentDir string, subFolders []string) (string, string, error) { + storeID := repo.StoreID + dir, err := fsmgr.GetSeafdirByPath(storeID, rootID, parentDir) + if err != nil { + err := fmt.Errorf("parent_dir %s doesn't exist in repo %s", parentDir, storeID) + return "", "", err + } + + entries := dir.Entries + var exists bool + var absPath string + var dirList []string + for i, dirName := range subFolders { + for _, de := range entries { + if de.Name == dirName { + exists = true + subDir, err := fsmgr.GetSeafdir(storeID, de.ID) + if err != nil { + err := fmt.Errorf("failed to get seaf dir: %v", err) + return "", "", err + } + entries = subDir.Entries + break + } + } + + if !exists { + relativePath := filepath.Join(subFolders[:i+1]...) + absPath = filepath.Join(parentDir, relativePath) + dirList = subFolders[i:] + break + } + exists = false + } + if dirList != nil { + dirList = dirList[1:] + } + if len(dirList) == 0 { + return absPath, "", nil + } + + dirID, err := genDirRecursive(repo, dirList) + if err != nil { + err := fmt.Errorf("failed to generate dir recursive: %v", err) + return "", "", err + } + + return absPath, dirID, nil +} + +func genDirRecursive(repo *repomgr.Repo, toPath []string) (string, error) { + if len(toPath) == 1 { + uniqueName := toPath[0] + mode := (syscall.S_IFDIR | 0644) + mtime := time.Now().Unix() + dent := fsmgr.NewDirent("", uniqueName, uint32(mode), mtime, "", 0) + newdir, err := fsmgr.NewSeafdir(1, []*fsmgr.SeafDirent{dent}) + if err != nil { + err := fmt.Errorf("failed to new seafdir: %v", err) + return "", err + } + err = fsmgr.SaveSeafdir(repo.StoreID, newdir) + if err != nil { + err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) + return "", err + } + + return newdir.DirID, nil + } + + ret, err := genDirRecursive(repo, toPath[1:]) + if err != nil { + err := fmt.Errorf("failed to generate dir recursive: %v", err) + return "", err + } + if ret != "" { + uniqueName := toPath[0] + mode := (syscall.S_IFDIR | 0644) + mtime := time.Now().Unix() + dent := fsmgr.NewDirent(ret, uniqueName, uint32(mode), mtime, "", 0) + newdir, err := fsmgr.NewSeafdir(1, []*fsmgr.SeafDirent{dent}) + if err != nil { + err := fmt.Errorf("failed to new seafdir: %v", err) + return "", err + } + err = fsmgr.SaveSeafdir(repo.StoreID, newdir) + if err != nil { + err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) + return "", err + } + ret = newdir.DirID + } + + return ret, nil +} + +func clearTmpFile(fsm *recvData, parentDir string) { + if fsm.rstart >= 0 && fsm.rend == fsm.fsize-1 { + filePath := filepath.Join("/", parentDir, fsm.fileNames[0]) + tmpFile, err := repomgr.GetUploadTmpFile(fsm.repoID, filePath) + if err == nil && tmpFile != "" { + os.Remove(tmpFile) + } + repomgr.DelUploadTmpFile(fsm.repoID, filePath) + } + + return +} + +func parseUploadHeaders(r *http.Request) (*recvData, *appError) { + tokenLen := 36 + parts := strings.Split(r.URL.Path[1:], "/") + if len(parts) < 2 { + msg := "Invalid URL" + return nil, &appError{nil, msg, http.StatusBadRequest} + } + urlOp := parts[0] + if len(parts[1]) < tokenLen { + msg := "Invalid URL" + return nil, &appError{nil, msg, http.StatusBadRequest} + } + token := parts[1][:tokenLen] + + accessInfo, appErr := parseWebaccessInfo(token) + if appErr != nil { + msg := "Access denied" + return nil, &appError{nil, msg, http.StatusBadRequest} + } + + repoID := accessInfo.repoID + op := accessInfo.op + user := accessInfo.user + id := accessInfo.objID + + status, err := repomgr.GetRepoStatus(repoID) + if err != nil { + msg := "Internal error.\n" + return nil, &appError{nil, msg, http.StatusInternalServerError} + } + if status != repomgr.RepoStatusNormal && status != -1 { + msg := "Access denied" + return nil, &appError{nil, msg, http.StatusBadRequest} + } + + if op == "upload-link" { + op = "upload" + } + if strings.Index(urlOp, op) != 0 { + msg := "Access denied" + return nil, &appError{nil, msg, http.StatusBadRequest} + } + + fsm := new(recvData) + + if op != "update" { + obj := make(map[string]interface{}) + if err := json.Unmarshal([]byte(id), &obj); err != nil { + err := fmt.Errorf("failed to decode obj data : %v", err) + return nil, &appError{err, "", http.StatusBadRequest} + } + + parentDir, ok := obj["parent_dir"].(string) + if !ok || parentDir == "" { + msg := "Invalid URL" + return nil, &appError{nil, msg, http.StatusBadRequest} + } + fsm.parentDir = parentDir + } + + fsm.tokenType = accessInfo.op + fsm.repoID = repoID + fsm.user = user + fsm.rstart = -1 + fsm.rend = -1 + fsm.fsize = -1 + + ranges := r.Header.Get("Content-Range") + if ranges != "" { + parseContentRange(ranges, fsm) + } + + return fsm, nil +} + +func postMultiFiles(rsp http.ResponseWriter, r *http.Request, repoID, parentDir, user string, fsm *recvData, replace bool, isAjax bool) *appError { + + fileNames := fsm.fileNames + files := fsm.files + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Failed to get repo.\n" + err := fmt.Errorf("Failed to get repo %s", repoID) + return &appError{err, msg, http.StatusInternalServerError} + } + + canonPath := getCanonPath(parentDir) + + for _, fileName := range fileNames { + if shouldIgnoreFile(fileName) { + msg := fmt.Sprintf("invalid fileName: %s.\n", fileName) + return &appError{nil, msg, http.StatusBadRequest} + } + } + if strings.Index(parentDir, "//") != -1 { + msg := "parent_dir contains // sequence.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + var cryptKey *seafileCrypt + if repo.IsEncrypted { + key, err := parseCryptKey(rsp, repoID, user) + if err != nil { + return err + } + cryptKey = key + } + + var ids []string + var sizes []int64 + if fsm.rstart >= 0 { + for _, filePath := range files { + id, size, err := indexBlocks(repo.StoreID, repo.Version, filePath, nil, cryptKey) + if err != nil { + err := fmt.Errorf("failed to index blocks: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + ids = append(ids, id) + sizes = append(sizes, size) + } + } else { + for _, handler := range fsm.fileHeaders { + id, size, err := indexBlocks(repo.StoreID, repo.Version, "", handler, cryptKey) + if err != nil { + err := fmt.Errorf("failed to index blocks: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + ids = append(ids, id) + sizes = append(sizes, size) + } + } + + retStr, err := postFilesAndGenCommit(fileNames, repo, user, canonPath, replace, ids, sizes) + if err != nil { + err := fmt.Errorf("failed to post files and gen commit: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + + _, ok := r.Form["ret-json"] + if ok || isAjax { + rsp.Write([]byte(retStr)) + } else { + var array []map[string]interface{} + err := json.Unmarshal([]byte(retStr), &array) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to decode data to json: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + + var ids []string + for _, v := range array { + id, ok := v["id"].(string) + if !ok { + msg := "Internal error.\n" + err := fmt.Errorf("failed to assert") + return &appError{err, msg, http.StatusInternalServerError} + } + ids = append(ids, id) + } + newIDs := strings.Join(ids, "\t") + rsp.Write([]byte(newIDs)) + } + + return nil +} + +func postFilesAndGenCommit(fileNames []string, repo *repomgr.Repo, user, canonPath string, replace bool, ids []string, sizes []int64) (string, error) { + headCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) + return "", err + } + var names []string + + var dents []*fsmgr.SeafDirent + for i, name := range fileNames { + if i > len(ids)-1 || i > len(sizes)-1 { + break + } + mode := (syscall.S_IFREG | 0644) + mtime := time.Now().Unix() + dent := fsmgr.NewDirent(ids[i], name, uint32(mode), mtime, "", sizes[i]) + dents = append(dents, dent) + } + + rootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, dents, user, replace, &names) + if err != nil { + err := fmt.Errorf("failed to post files to %s in repo %s", canonPath, repo.ID) + return "", err + } + + var buf string + if len(fileNames) > 1 { + buf = fmt.Sprintf("Added \"%s\" and %d more files.", fileNames[0], len(fileNames)-1) + } else { + buf = fmt.Sprintf("Added \"%s\".", fileNames[0]) + } + + _, err = genNewCommit(repo, headCommit, rootID, user, buf) + if err != nil { + err := fmt.Errorf("failed to generate new commit: %v", err) + return "", err + } + + go mergeVirtualRepo(repo.ID, "") + + go updateRepoSize(repo.ID) + + retJSON, err := formatJSONRet(names, ids, sizes) + if err != nil { + err := fmt.Errorf("failed to format json data") + return "", err + } + + return string(retJSON), nil +} + +func formatJSONRet(nameList, idList []string, sizeList []int64) ([]byte, error) { + var array []map[string]interface{} + for i := range nameList { + if i >= len(idList) || i >= len(sizeList) { + break + } + obj := make(map[string]interface{}) + obj["name"] = nameList[i] + obj["id"] = idList[i] + obj["size"] = sizeList[i] + array = append(array, obj) + } + + jsonstr, err := json.Marshal(array) + if err != nil { + err := fmt.Errorf("failed to convert array to json") + return nil, err + } + + return jsonstr, nil +} + +func getCanonPath(p string) string { + formatPath := strings.Replace(p, "\\", "/", -1) + return filepath.Join(formatPath) +} + +func genNewCommit(repo *repomgr.Repo, base *commitmgr.Commit, newRoot, user, desc string) (string, error) { + var retryCnt int + repoID := repo.ID + commit := commitmgr.NewCommit(repoID, base.CommitID, newRoot, user, desc) + repomgr.RepoToCommit(repo, commit) + err := commitmgr.Save(commit) + if err != nil { + err := fmt.Errorf("failed to add commit: %v", err) + return "", err + } + var commitID string + + for retry, err := genCommitNeedRetry(repo, base, commit, newRoot, user, &commitID); retry || err != nil; { + if err != nil { + return "", err + } + + if retryCnt < 3 { + random := rand.Intn(10) + 1 + time.Sleep(time.Duration(random*100) * time.Millisecond) + repo = repomgr.Get(repoID) + if repo == nil { + err := fmt.Errorf("repo %s doesn't exist", repoID) + return "", err + } + retryCnt++ + } else { + err := fmt.Errorf("stop updating repo %s after 3 retries", repoID) + return "", err + } + } + + return commitID, nil +} + +func fastForwardOrMerge(user string, repo *repomgr.Repo, base, newCommit *commitmgr.Commit) error { + var retryCnt int + for retry, err := genCommitNeedRetry(repo, base, newCommit, newCommit.RootID, user, nil); retry || err != nil; { + if err != nil { + return err + } + + if retryCnt < 3 { + random := rand.Intn(10) + 1 + time.Sleep(time.Duration(random*100) * time.Millisecond) + retryCnt++ + } else { + err := fmt.Errorf("stop updating repo %s after 3 retries", repo.ID) + return err + } + } + return nil +} + +func genCommitNeedRetry(repo *repomgr.Repo, base *commitmgr.Commit, commit *commitmgr.Commit, newRoot, user string, commitID *string) (bool, error) { + repoID := repo.ID + var mergeDesc string + var mergedCommit *commitmgr.Commit + currentHead, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + err := fmt.Errorf("failed to get head commit for repo %s", repoID) + return false, err + } + + if base.CommitID != currentHead.CommitID { + roots := []string{base.RootID, currentHead.RootID, newRoot} + opt := new(mergeOptions) + opt.remoteRepoID = repoID + opt.remoteHead = commit.CommitID + + err := mergeTrees(repo.StoreID, roots, opt) + if err != nil { + err := fmt.Errorf("failed to merge") + return false, err + } + + if !opt.conflict { + mergeDesc = fmt.Sprintf("Auto merge by system") + } else { + mergeDesc = genMergeDesc(repo, opt.mergedRoot, currentHead.RootID, newRoot) + if mergeDesc == "" { + mergeDesc = fmt.Sprintf("Auto merge by system") + } + } + + mergedCommit = commitmgr.NewCommit(repoID, currentHead.CommitID, opt.mergedRoot, user, mergeDesc) + repomgr.RepoToCommit(repo, mergedCommit) + mergedCommit.SecondParentID = commit.CommitID + mergedCommit.NewMerge = 1 + if opt.conflict { + mergedCommit.Conflict = 1 + } + + err = commitmgr.Save(mergedCommit) + if err != nil { + err := fmt.Errorf("failed to add commit: %v", err) + return false, err + } + } else { + mergedCommit = commit + } + + err = updateBranch(repoID, mergedCommit.CommitID, currentHead.CommitID) + if err != nil { + return true, nil + } + + if commitID != nil { + *commitID = mergedCommit.CommitID + } + return false, nil +} + +func genMergeDesc(repo *repomgr.Repo, mergedRoot, p1Root, p2Root string) string { + var results []*diff.DiffEntry + err := diff.DiffMergeRoots(repo.StoreID, mergedRoot, p1Root, p2Root, &results, true) + if err != nil { + return "" + } + + desc := diff.DiffResultsToDesc(results) + + return desc +} + +func updateBranch(repoID, newCommitID, oldCommitID string) error { + var commitID string + name := "master" + var sqlStr string + if strings.EqualFold(dbType, "mysql") { + sqlStr = "SELECT commit_id FROM Branch WHERE name = ? AND repo_id = ? FOR UPDATE" + } else { + sqlStr = "SELECT commit_id FROM Branch WHERE name = ? AND repo_id = ?" + } + + trans, err := seafileDB.Begin() + if err != nil { + err := fmt.Errorf("failed to start transaction: %v", err) + return err + } + row := trans.QueryRow(sqlStr, name, repoID) + if err := row.Scan(&commitID); err != nil { + if err != sql.ErrNoRows { + trans.Rollback() + return err + } + } + if oldCommitID != commitID { + trans.Rollback() + err := fmt.Errorf("head commit id has changed") + return err + } + + sqlStr = "UPDATE Branch SET commit_id = ? WHERE name = ? AND repo_id = ?" + _, err = trans.Exec(sqlStr, newCommitID, name, repoID) + if err != nil { + trans.Rollback() + return err + } + + trans.Commit() + + return nil +} + +func doPostMultiFiles(repo *repomgr.Repo, rootID, parentDir string, dents []*fsmgr.SeafDirent, user string, replace bool, names *[]string) (string, error) { + if parentDir[0] == '/' { + parentDir = parentDir[1:] + } + + id, err := postMultiFilesRecursive(repo, rootID, parentDir, user, dents, replace, names) + if err != nil { + err := fmt.Errorf("failed to post multi files: %v", err) + return "", err + } + + return id, nil +} + +func postMultiFilesRecursive(repo *repomgr.Repo, dirID, toPath, user string, dents []*fsmgr.SeafDirent, replace bool, names *[]string) (string, error) { + olddir, err := fsmgr.GetSeafdir(repo.StoreID, dirID) + if err != nil { + err := fmt.Errorf("failed to get dir") + return "", err + } + + var ret string + + if toPath == "" { + err := addNewEntries(repo, user, &olddir.Entries, dents, replace, names) + if err != nil { + err := fmt.Errorf("failed to add new entries: %v", err) + return "", err + } + newdir, err := fsmgr.NewSeafdir(1, olddir.Entries) + if err != nil { + err := fmt.Errorf("failed to new seafdir: %v", err) + return "", err + } + err = fsmgr.SaveSeafdir(repo.StoreID, newdir) + if err != nil { + err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) + return "", err + } + + return newdir.DirID, nil + } + + var remain string + firstName := toPath + if slash := strings.Index(toPath, "/"); slash >= 0 { + remain = toPath[slash+1:] + firstName = toPath[:slash] + } + + entries := olddir.Entries + for i, dent := range entries { + if dent.Name != firstName { + continue + } + + id, err := postMultiFilesRecursive(repo, dent.ID, remain, user, dents, replace, names) + if err != nil { + err := fmt.Errorf("failed to post dirent %s: %v", dent.Name, err) + return "", err + } + ret = id + if id != "" { + entries[i].ID = id + entries[i].Mtime = time.Now().Unix() + } + break + } + + if ret != "" { + newdir, err := fsmgr.NewSeafdir(1, entries) + if err != nil { + err := fmt.Errorf("failed to new seafdir: %v", err) + return "", err + } + err = fsmgr.SaveSeafdir(repo.StoreID, newdir) + if err != nil { + err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) + return "", err + } + ret = newdir.DirID + } + + return ret, nil +} + +func addNewEntries(repo *repomgr.Repo, user string, oldDents *[]*fsmgr.SeafDirent, newDents []*fsmgr.SeafDirent, replaceExisted bool, names *[]string) error { + for _, dent := range newDents { + var replace bool + var uniqueName string + if replaceExisted { + for i, entry := range *oldDents { + if entry.Name == dent.Name { + replace = true + *oldDents = append((*oldDents)[:i], (*oldDents)[i+1:]...) + break + } + } + } + + if replace { + uniqueName = dent.Name + } else { + uniqueName = genUniqueName(dent.Name, *oldDents) + } + if uniqueName != "" { + newDent := fsmgr.NewDirent(dent.ID, uniqueName, dent.Mode, dent.Mtime, user, dent.Size) + *oldDents = append(*oldDents, newDent) + *names = append(*names, uniqueName) + } else { + err := fmt.Errorf("failed to generate unique name for %s", dent.Name) + return err + } + } + + sort.Sort(Dirents(*oldDents)) + + return nil +} + +func genUniqueName(fileName string, entries []*fsmgr.SeafDirent) string { + var uniqueName string + var name string + i := 1 + dot := strings.Index(fileName, ".") + if dot < 0 { + name = fileName + } else { + name = fileName[:dot] + } + uniqueName = fileName + for nameExists(entries, uniqueName) && i <= 100 { + if dot < 0 { + uniqueName = fmt.Sprintf("%s (%d)", name, i) + } else { + uniqueName = fmt.Sprintf("%s (%d).%s", name, i, fileName[dot+1:]) + } + i++ + } + + if i <= 100 { + return uniqueName + } + + return "" +} + +func nameExists(entries []*fsmgr.SeafDirent, fileName string) bool { + for _, entry := range entries { + if entry.Name == fileName { + return true + } + } + + return false +} + +func shouldIgnoreFile(fileName string) bool { + if !utf8.ValidString(fileName) { + log.Printf("file name %s contains non-UTF8 characters, skip", fileName) + return true + } + + if len(fileName) >= 256 { + return true + } + + if strings.Index(fileName, "/") != -1 { + return true + } + + return false +} + +func indexBlocks(repoID string, version int, filePath string, handler *multipart.FileHeader, cryptKey *seafileCrypt) (string, int64, error) { + var size int64 + if handler != nil { + size = handler.Size + } else { + f, err := os.Open(filePath) + if err != nil { + err := fmt.Errorf("failed to open file: %s: %v", filePath, err) + return "", -1, err + } + defer f.Close() + fileInfo, err := f.Stat() + if err != nil { + err := fmt.Errorf("failed to stat file %s: %v", filePath, err) + return "", -1, err + } + size = fileInfo.Size() + } + + if size == 0 { + return fsmgr.EmptySha1, 0, nil + } + + chunkJobs := make(chan chunkingData, 10) + results := make(chan chunkingResult, 10) + go createChunkPool(int(options.maxIndexingThreads), chunkJobs, results) + + var blkSize int64 + var offset int64 + + jobNum := uint64(size)/options.fixedBlockSize + 1 + blkIDs := make([]string, jobNum) + + left := size + for { + if uint64(left) >= options.fixedBlockSize { + blkSize = int64(options.fixedBlockSize) + } else { + blkSize = left + } + if left > 0 { + job := chunkingData{repoID, filePath, handler, offset, cryptKey} + select { + case chunkJobs <- job: + left -= blkSize + offset += blkSize + case result := <-results: + if result.err != nil { + close(chunkJobs) + go func() { + for result := range results { + _ = result + } + }() + return "", -1, result.err + } + blkIDs[result.idx] = result.blkID + } + } else { + close(chunkJobs) + for result := range results { + if result.err != nil { + go func() { + for result := range results { + _ = result + } + }() + return "", -1, result.err + } + blkIDs[result.idx] = result.blkID + } + break + } + } + + fileID, err := writeSeafile(repoID, version, size, blkIDs) + if err != nil { + err := fmt.Errorf("failed to write seafile: %v", err) + return "", -1, err + } + + return fileID, size, nil +} + +func writeSeafile(repoID string, version int, fileSize int64, blkIDs []string) (string, error) { + seafile, err := fsmgr.NewSeafile(version, fileSize, blkIDs) + if err != nil { + err := fmt.Errorf("failed to new seafile: %v", err) + return "", err + } + + err = fsmgr.SaveSeafile(repoID, seafile) + if err != nil { + err := fmt.Errorf("failed to save seafile %s/%s", repoID, seafile.FileID) + return "", err + } + + return seafile.FileID, nil +} + +type chunkingData struct { + repoID string + filePath string + handler *multipart.FileHeader + offset int64 + cryptKey *seafileCrypt +} + +type chunkingResult struct { + idx int64 + blkID string + err error +} + +func createChunkPool(n int, chunkJobs chan chunkingData, res chan chunkingResult) { + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go chunkingWorker(&wg, chunkJobs, res) + } + wg.Wait() + close(res) +} + +func chunkingWorker(wg *sync.WaitGroup, chunkJobs chan chunkingData, res chan chunkingResult) { + for job := range chunkJobs { + job := job + blkID, err := chunkFile(job) + idx := job.offset / int64(options.fixedBlockSize) + result := chunkingResult{idx, blkID, err} + res <- result + } + wg.Done() +} + +func chunkFile(job chunkingData) (string, error) { + repoID := job.repoID + offset := job.offset + filePath := job.filePath + handler := job.handler + blkSize := options.fixedBlockSize + cryptKey := job.cryptKey + var file multipart.File + if handler != nil { + f, err := handler.Open() + if err != nil { + err := fmt.Errorf("failed to open file for read: %v", err) + return "", err + } + defer f.Close() + file = f + } else { + f, err := os.Open(filePath) + if err != nil { + err := fmt.Errorf("failed to open file for read: %v", err) + return "", err + } + defer f.Close() + file = f + } + _, err := file.Seek(offset, os.SEEK_SET) + if err != nil { + err := fmt.Errorf("failed to seek file: %v", err) + return "", err + } + buf := make([]byte, blkSize) + n, err := file.Read(buf) + if err != nil { + err := fmt.Errorf("failed to seek file: %v", err) + return "", err + } + buf = buf[:n] + + blkID, err := writeChunk(repoID, buf, int64(n), cryptKey) + if err != nil { + err := fmt.Errorf("failed to write chunk: %v", err) + return "", err + } + + return blkID, nil +} + +func writeChunk(repoID string, input []byte, blkSize int64, cryptKey *seafileCrypt) (string, error) { + var blkID string + if cryptKey != nil && blkSize > 0 { + encKey := cryptKey.key + encIv := cryptKey.iv + encoded, err := encrypt(input, encKey, encIv) + if err != nil { + err := fmt.Errorf("failed to encrypt block: %v", err) + return "", err + } + checkSum := sha1.Sum(encoded) + blkID = hex.EncodeToString(checkSum[:]) + reader := bytes.NewReader(encoded) + err = blockmgr.Write(repoID, blkID, reader) + if err != nil { + err := fmt.Errorf("failed to write block: %v", err) + return "", err + } + } else { + checkSum := sha1.Sum(input) + blkID = hex.EncodeToString(checkSum[:]) + reader := bytes.NewReader(input) + err := blockmgr.Write(repoID, blkID, reader) + if err != nil { + err := fmt.Errorf("failed to write block: %v", err) + return "", err + } + } + + return blkID, nil +} + +func checkTmpFileList(fsm *recvData) *appError { + var totalSize int64 + if fsm.rstart >= 0 { + for _, tmpFile := range fsm.files { + fileInfo, err := os.Stat(tmpFile) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("[upload] Failed to stat temp file %s", tmpFile) + return &appError{err, msg, http.StatusInternalServerError} + } + totalSize += fileInfo.Size() + } + } else { + for _, handler := range fsm.fileHeaders { + totalSize += handler.Size + } + } + + if options.maxUploadSize > 0 && uint64(totalSize) > options.maxUploadSize { + msg := "File size is too large.\n" + return &appError{nil, msg, seafHTTPResTooLarge} + } + + return nil +} + +func checkParentDir(repoID string, parentDir string) *appError { + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Failed to get repo.\n" + err := fmt.Errorf("Failed to get repo %s", repoID) + return &appError{err, msg, http.StatusInternalServerError} + } + + commit, err := commitmgr.Load(repoID, repo.HeadCommitID) + if err != nil { + msg := "Failed to get head commit.\n" + err := fmt.Errorf("Failed to get head commit for repo %s", repoID) + return &appError{err, msg, http.StatusInternalServerError} + } + + canonPath := getCanonPath(parentDir) + + _, err = fsmgr.GetSeafdirByPath(repo.StoreID, commit.RootID, canonPath) + if err != nil { + msg := "Parent dir doesn't exist.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + return nil +} + +func isParentMatched(uploadDir, parentDir string) bool { + uploadCanon := filepath.Join("/", uploadDir) + parentCanon := filepath.Join("/", parentDir) + if uploadCanon != parentCanon { + return false + } + + return true +} + +func parseContentRange(ranges string, fsm *recvData) bool { + start := strings.Index(ranges, "bytes") + end := strings.Index(ranges, "-") + slash := strings.Index(ranges, "/") + + if start < 0 || end < 0 || slash < 0 { + return false + } + + startStr := strings.TrimLeft(ranges[start+len("bytes"):end], " ") + firstByte, err := strconv.ParseInt(startStr, 10, 64) + if err != nil { + return false + } + + lastByte, err := strconv.ParseInt(ranges[end+1:slash], 10, 64) + if err != nil { + return false + } + + fileSize, err := strconv.ParseInt(ranges[slash+1:], 10, 64) + if err != nil { + return false + } + + if firstByte > lastByte || lastByte >= fileSize { + return false + } + + fsm.rstart = firstByte + fsm.rend = lastByte + fsm.fsize = fileSize + + return true +} + +type webaccessInfo struct { + repoID string + objID string + op string + user string +} + +func parseWebaccessInfo(token string) (*webaccessInfo, *appError) { + webaccess, err := rpcclient.Call("seafile_web_query_access_token", token) + if err != nil { + err := fmt.Errorf("failed to get web access token: %v", err) + return nil, &appError{err, "", http.StatusInternalServerError} + } + if webaccess == nil { + msg := "Bad access token" + return nil, &appError{err, msg, http.StatusBadRequest} + } + + webaccessMap, ok := webaccess.(map[string]interface{}) + if !ok { + return nil, &appError{nil, "", http.StatusInternalServerError} + } + + accessInfo := new(webaccessInfo) + repoID, ok := webaccessMap["repo-id"].(string) + if !ok { + return nil, &appError{nil, "", http.StatusInternalServerError} + } + accessInfo.repoID = repoID + + id, ok := webaccessMap["obj-id"].(string) + if !ok { + return nil, &appError{nil, "", http.StatusInternalServerError} + } + accessInfo.objID = id + + op, ok := webaccessMap["op"].(string) + if !ok { + return nil, &appError{nil, "", http.StatusInternalServerError} + } + accessInfo.op = op + + user, ok := webaccessMap["username"].(string) + if !ok { + return nil, &appError{nil, "", http.StatusInternalServerError} + } + accessInfo.user = user + + return accessInfo, nil +} + +func updateDir(repoID, dirPath, newDirID, user, headID string) (string, error) { + repo := repomgr.Get(repoID) + if repo == nil { + err := fmt.Errorf("failed to get repo %.10s", repoID) + return "", err + } + + var base string + if headID == "" { + base = repo.HeadCommitID + } else { + base = headID + } + + headCommit, err := commitmgr.Load(repo.ID, base) + if err != nil { + err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) + return "", err + } + + if dirPath == "/" { + commitDesc := genCommitDesc(repo, newDirID, headCommit.RootID) + if commitDesc == "" { + commitDesc = fmt.Sprintf("Auto merge by system") + } + newCommitID, err := genNewCommit(repo, headCommit, newDirID, user, commitDesc) + if err != nil { + err := fmt.Errorf("failed to generate new commit: %v", err) + return "", err + } + return newCommitID, nil + } + + parent := filepath.Dir(dirPath) + canonPath := getCanonPath(parent) + dirName := filepath.Base(dirPath) + + dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, headCommit.RootID, canonPath) + if err != nil { + err := fmt.Errorf("dir %s doesn't exist in repo %s", canonPath, repo.StoreID) + return "", err + } + var exists bool + for _, de := range dir.Entries { + if de.Name == dirName { + exists = true + } + } + if !exists { + err := fmt.Errorf("directory %s doesn't exist in repo %s", dirName, repo.StoreID) + return "", err + } + newDent := new(fsmgr.SeafDirent) + newDent.ID = newDirID + newDent.Mode = (syscall.S_IFDIR | 0644) + newDent.Mtime = time.Now().Unix() + newDent.Name = dirName + + rootID, err := doPutFile(repo, headCommit.RootID, canonPath, newDent) + if err != nil || rootID == "" { + err := fmt.Errorf("failed to put file") + return "", err + } + + commitDesc := genCommitDesc(repo, rootID, headCommit.RootID) + if commitDesc == "" { + commitDesc = fmt.Sprintf("Auto merge by system") + } + + newCommitID, err := genNewCommit(repo, headCommit, rootID, user, commitDesc) + if err != nil { + err := fmt.Errorf("failed to generate new commit: %v", err) + return "", err + } + + go updateRepoSize(repoID) + + return newCommitID, nil +} + +func genCommitDesc(repo *repomgr.Repo, root, parentRoot string) string { + var results []*diff.DiffEntry + err := diff.DiffCommitRoots(repo.StoreID, parentRoot, root, &results, true) + if err != nil { + return "" + } + + desc := diff.DiffResultsToDesc(results) + + return desc +} + +func doPutFile(repo *repomgr.Repo, rootID, parentDir string, dent *fsmgr.SeafDirent) (string, error) { + if strings.Index(parentDir, "/") == 0 { + parentDir = parentDir[1:] + } + + return putFileRecursive(repo, rootID, parentDir, dent) +} + +func putFileRecursive(repo *repomgr.Repo, dirID, toPath string, newDent *fsmgr.SeafDirent) (string, error) { + olddir, err := fsmgr.GetSeafdir(repo.StoreID, dirID) + if err != nil { + err := fmt.Errorf("failed to get dir") + return "", err + } + entries := olddir.Entries + + var ret string + + if toPath == "" { + var newEntries []*fsmgr.SeafDirent + for _, dent := range entries { + if dent.Name == newDent.Name { + newEntries = append(newEntries, newDent) + } else { + newEntries = append(newEntries, dent) + } + } + + newdir, err := fsmgr.NewSeafdir(1, newEntries) + if err != nil { + err := fmt.Errorf("failed to new seafdir: %v", err) + return "", err + } + err = fsmgr.SaveSeafdir(repo.StoreID, newdir) + if err != nil { + err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) + return "", err + } + + return newdir.DirID, nil + } + + var remain string + firstName := toPath + if slash := strings.Index(toPath, "/"); slash >= 0 { + remain = toPath[slash+1:] + firstName = toPath[:slash] + } + + for _, dent := range entries { + if dent.Name != firstName { + continue + } + id, err := putFileRecursive(repo, dent.ID, remain, newDent) + if err != nil { + err := fmt.Errorf("failed to put dirent %s: %v", dent.Name, err) + return "", err + } + if id != "" { + dent.ID = id + dent.Mtime = time.Now().Unix() + } + ret = id + break + } + + if ret != "" { + newdir, err := fsmgr.NewSeafdir(1, entries) + if err != nil { + err := fmt.Errorf("failed to new seafdir: %v", err) + return "", err + } + err = fsmgr.SaveSeafdir(repo.StoreID, newdir) + if err != nil { + err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) + return "", err + } + ret = newdir.DirID + } + + return ret, nil +} + +func updateAPICB(rsp http.ResponseWriter, r *http.Request) *appError { + fsm, err := parseUploadHeaders(r) + if err != nil { + return err + } + + if err := doUpdate(rsp, r, fsm, false); err != nil { + formatJSONError(rsp, err) + return err + } + + return nil +} + +func updateAjaxCB(rsp http.ResponseWriter, r *http.Request) *appError { + fsm, err := parseUploadHeaders(r) + if err != nil { + return err + } + + if err := doUpdate(rsp, r, fsm, true); err != nil { + formatJSONError(rsp, err) + return err + } + + return nil +} + +func doUpdate(rsp http.ResponseWriter, r *http.Request, fsm *recvData, isAjax bool) *appError { + rsp.Header().Set("Access-Control-Allow-Origin", "*") + rsp.Header().Set("Access-Control-Allow-Headers", "x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization") + rsp.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS") + rsp.Header().Set("Access-Control-Max-Age", "86400") + + if r.Method == "OPTIONS" { + rsp.WriteHeader(http.StatusOK) + return nil + } + + if err := r.ParseMultipartForm(1 << 20); err != nil { + return &appError{nil, "", http.StatusBadRequest} + } + defer r.MultipartForm.RemoveAll() + + repoID := fsm.repoID + user := fsm.user + + targetFile := r.FormValue("target_file") + if targetFile == "" { + msg := "Invalid URL.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + parentDir := filepath.Dir(targetFile) + fileName := filepath.Base(targetFile) + + defer clearTmpFile(fsm, parentDir) + + if fsm.rstart >= 0 { + if parentDir[0] != '/' { + msg := "Invalid parent dir" + return &appError{nil, msg, http.StatusBadRequest} + } + + formFiles := r.MultipartForm.File + files, ok := formFiles["file"] + if !ok { + msg := "No file in multipart form.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + if len(files) > 1 { + msg := "More files in one request" + return &appError{nil, msg, http.StatusBadRequest} + } + + err := writeBlockDataToTmpFile(r, fsm, formFiles, repoID, parentDir) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to write block data to tmp file: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + + if fsm.rend != fsm.fsize-1 { + success := "{\"success\": true}" + _, err := rsp.Write([]byte(success)) + if err != nil { + log.Printf("failed to write data to response.\n") + } + rsp.Header().Set("Content-Type", "application/json; charset=utf-8") + + return nil + } + } else { + formFiles := r.MultipartForm.File + fileHeaders, ok := formFiles["file"] + if !ok { + msg := "No file in multipart form.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + if len(fileHeaders) > 1 { + msg := "More files in one request" + return &appError{nil, msg, http.StatusBadRequest} + } + for _, handler := range fileHeaders { + fileName := filepath.Base(handler.Filename) + fsm.fileNames = append(fsm.fileNames, fileName) + fsm.fileHeaders = append(fsm.fileHeaders, handler) + } + } + + if fsm.fileNames == nil { + msg := "No file.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + if err := checkParentDir(repoID, parentDir); err != nil { + return err + } + + if err := checkTmpFileList(fsm); err != nil { + return err + } + + var contentLen int64 + if fsm.fsize > 0 { + contentLen = fsm.fsize + } else { + lenstr := rsp.Header().Get("Content-Length") + if lenstr == "" { + contentLen = -1 + } else { + tmpLen, err := strconv.ParseInt(lenstr, 10, 64) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to parse content len: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + contentLen = tmpLen + } + } + + ret, err := checkQuota(repoID, contentLen) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to check quota: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + if ret == 1 { + msg := "Out of quota.\n" + return &appError{nil, msg, seafHTTPResNoQuota} + } + + headIDs, ok := r.Form["head"] + var headID string + if ok { + headID = headIDs[0] + } + + if err := putFile(rsp, r, repoID, parentDir, user, fileName, fsm, headID, isAjax); err != nil { + return err + } + + oper := "web-file-upload" + sendStatisticMsg(repoID, user, oper, uint64(contentLen)) + + rsp.Header().Set("Content-Type", "application/json; charset=utf-8") + + return nil +} + +func putFile(rsp http.ResponseWriter, r *http.Request, repoID, parentDir, user, fileName string, fsm *recvData, headID string, isAjax bool) *appError { + files := fsm.files + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Failed to get repo.\n" + err := fmt.Errorf("Failed to get repo %s", repoID) + return &appError{err, msg, http.StatusInternalServerError} + } + + var base string + if headID != "" { + base = headID + } else { + base = repo.HeadCommitID + } + + headCommit, err := commitmgr.Load(repo.ID, base) + if err != nil { + msg := "Failed to get head commit.\n" + err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) + return &appError{err, msg, http.StatusInternalServerError} + } + + canonPath := getCanonPath(parentDir) + + if shouldIgnoreFile(fileName) { + msg := fmt.Sprintf("invalid fileName: %s.\n", fileName) + return &appError{nil, msg, http.StatusBadRequest} + } + + if strings.Index(parentDir, "//") != -1 { + msg := "parent_dir contains // sequence.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + exist, _ := checkFileExists(repo.StoreID, headCommit.RootID, canonPath, fileName) + if !exist { + msg := "File does not exist.\n" + return &appError{nil, msg, seafHTTPResNotExists} + } + + var cryptKey *seafileCrypt + if repo.IsEncrypted { + key, err := parseCryptKey(rsp, repoID, user) + if err != nil { + return err + } + cryptKey = key + } + + var fileID string + var size int64 + if fsm.rstart >= 0 { + filePath := files[0] + id, fileSize, err := indexBlocks(repo.StoreID, repo.Version, filePath, nil, cryptKey) + if err != nil { + err := fmt.Errorf("failed to index blocks: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + fileID = id + size = fileSize + } else { + handler := fsm.fileHeaders[0] + id, fileSize, err := indexBlocks(repo.StoreID, repo.Version, "", handler, cryptKey) + if err != nil { + err := fmt.Errorf("failed to index blocks: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + fileID = id + size = fileSize + } + + fullPath := filepath.Join(parentDir, fileName) + oldFileID, _, _ := fsmgr.GetObjIDByPath(repo.StoreID, headCommit.RootID, fullPath) + if fileID == oldFileID { + if isAjax { + retJSON, err := formatUpdateJSONRet(fileName, fileID, size) + if err != nil { + err := fmt.Errorf("failed to format json data") + return &appError{err, "", http.StatusInternalServerError} + } + rsp.Write(retJSON) + } else { + rsp.Write([]byte(fileID)) + } + return nil + } + + mtime := time.Now().Unix() + mode := (syscall.S_IFREG | 0644) + newDent := fsmgr.NewDirent(fileID, fileName, uint32(mode), mtime, user, size) + + var names []string + rootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, []*fsmgr.SeafDirent{newDent}, user, true, &names) + if err != nil { + err := fmt.Errorf("failed to put file %s to %s in repo %s: %v", fileName, canonPath, repo.ID, err) + return &appError{err, "", http.StatusInternalServerError} + } + + desc := fmt.Sprintf("Modified \"%s\"", fileName) + _, err = genNewCommit(repo, headCommit, rootID, user, desc) + if err != nil { + err := fmt.Errorf("failed to generate new commit: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + + if isAjax { + retJSON, err := formatUpdateJSONRet(fileName, fileID, size) + if err != nil { + err := fmt.Errorf("failed to format json data") + return &appError{err, "", http.StatusInternalServerError} + } + rsp.Write(retJSON) + } else { + rsp.Write([]byte(fileID)) + } + + go mergeVirtualRepo(repo.ID, "") + go updateRepoSize(repo.ID) + + return nil +} + +func formatUpdateJSONRet(fileName, fileID string, size int64) ([]byte, error) { + var array []map[string]interface{} + obj := make(map[string]interface{}) + obj["name"] = fileName + obj["id"] = fileID + obj["size"] = size + array = append(array, obj) + + jsonstr, err := json.Marshal(array) + if err != nil { + err := fmt.Errorf("failed to convert array to json") + return nil, err + } + + return jsonstr, nil +} + +func checkFileExists(storeID, rootID, parentDir, fileName string) (bool, error) { + dir, err := fsmgr.GetSeafdirByPath(storeID, rootID, parentDir) + if err != nil { + err := fmt.Errorf("parent_dir %s doesn't exist in repo %s: %v", parentDir, storeID, err) + return false, err + } + + var ret bool + entries := dir.Entries + for _, de := range entries { + if de.Name == fileName { + ret = true + break + } + } + + return ret, nil +} + +func uploadBlksAPICB(rsp http.ResponseWriter, r *http.Request) *appError { + fsm, err := parseUploadHeaders(r) + if err != nil { + return err + } + + if err := doUploadBlks(rsp, r, fsm); err != nil { + formatJSONError(rsp, err) + return err + } + + return nil +} + +func doUploadBlks(rsp http.ResponseWriter, r *http.Request, fsm *recvData) *appError { + if err := r.ParseMultipartForm(1 << 20); err != nil { + return &appError{nil, "", http.StatusBadRequest} + } + defer r.MultipartForm.RemoveAll() + + repoID := fsm.repoID + user := fsm.user + + replaceStr := r.FormValue("replace") + var replaceExisted bool + if replaceStr != "" { + replace, err := strconv.ParseInt(replaceStr, 10, 64) + if err != nil || (replace != 0 && replace != 1) { + msg := "Invalid argument.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + if replace == 1 { + replaceExisted = true + } + } + + parentDir := r.FormValue("parent_dir") + if parentDir == "" { + msg := "Invalid URL.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + fileName := r.FormValue("file_name") + if fileName == "" { + msg := "Invalid URL.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + fileSizeStr := r.FormValue("file_size") + var fileSize int64 = -1 + if fileSizeStr != "" { + size, err := strconv.ParseInt(fileSizeStr, 10, 64) + if err != nil { + msg := "Invalid argument.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + fileSize = size + } + + if fileSize < 0 { + msg := "Invalid URL.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + commitOnlyStr, ok := r.Form["commitonly"] + if !ok || len(commitOnlyStr) == 0 { + msg := "Only commit supported.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + if err := checkParentDir(repoID, parentDir); err != nil { + return err + } + + blockIDsJSON := r.FormValue("blockids") + if blockIDsJSON == "" { + msg := "Invalid URL.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + fileID, appErr := commitFileBlocks(repoID, parentDir, fileName, blockIDsJSON, user, fileSize, replaceExisted) + if appErr != nil { + return appErr + } + _, ok = r.Form["ret-json"] + if ok { + obj := make(map[string]interface{}) + obj["id"] = fileID + + jsonstr, err := json.Marshal(obj) + if err != nil { + err := fmt.Errorf("failed to convert array to json: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + rsp.Write([]byte(jsonstr)) + } else { + rsp.Write([]byte("\"")) + rsp.Write([]byte(fileID)) + rsp.Write([]byte("\"")) + } + + rsp.Header().Set("Content-Type", "application/json; charset=utf-8") + + return nil +} + +func commitFileBlocks(repoID, parentDir, fileName, blockIDsJSON, user string, fileSize int64, replace bool) (string, *appError) { + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Failed to get repo.\n" + err := fmt.Errorf("Failed to get repo %s", repoID) + return "", &appError{err, msg, http.StatusInternalServerError} + } + + headCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + msg := "Failed to get head commit.\n" + err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) + return "", &appError{err, msg, http.StatusInternalServerError} + } + + canonPath := getCanonPath(parentDir) + + if shouldIgnoreFile(fileName) { + msg := fmt.Sprintf("invalid fileName: %s.\n", fileName) + return "", &appError{nil, msg, http.StatusBadRequest} + } + + if strings.Index(parentDir, "//") != -1 { + msg := "parent_dir contains // sequence.\n" + return "", &appError{nil, msg, http.StatusBadRequest} + } + + var blkIDs []string + err = json.Unmarshal([]byte(blockIDsJSON), &blkIDs) + if err != nil { + err := fmt.Errorf("failed to decode data to json: %v", err) + return "", &appError{err, "", http.StatusInternalServerError} + } + + appErr := checkQuotaBeforeCommitBlocks(repo.StoreID, blkIDs) + if appErr != nil { + return "", appErr + } + + fileID, appErr := indexExistedFileBlocks(repoID, repo.Version, blkIDs, fileSize) + if appErr != nil { + return "", appErr + } + + mtime := time.Now().Unix() + mode := (syscall.S_IFREG | 0644) + newDent := fsmgr.NewDirent(fileID, fileName, uint32(mode), mtime, user, fileSize) + var names []string + rootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, []*fsmgr.SeafDirent{newDent}, user, replace, &names) + if err != nil { + err := fmt.Errorf("failed to post file %s to %s in repo %s: %v", fileName, canonPath, repo.ID, err) + return "", &appError{err, "", http.StatusInternalServerError} + } + + desc := fmt.Sprintf("Added \"%s\"", fileName) + _, err = genNewCommit(repo, headCommit, rootID, user, desc) + if err != nil { + err := fmt.Errorf("failed to generate new commit: %v", err) + return "", &appError{err, "", http.StatusInternalServerError} + } + + return fileID, nil +} + +func checkQuotaBeforeCommitBlocks(storeID string, blockIDs []string) *appError { + var totalSize int64 + for _, blkID := range blockIDs { + size, err := blockmgr.Stat(storeID, blkID) + if err != nil { + err := fmt.Errorf("failed to stat block %s in store %s: %v", blkID, storeID, err) + return &appError{err, "", http.StatusInternalServerError} + } + totalSize += size + } + ret, err := checkQuota(storeID, totalSize) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to check quota: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + if ret == 1 { + msg := "Out of quota.\n" + return &appError{nil, msg, seafHTTPResNoQuota} + } + + return nil +} + +func indexExistedFileBlocks(repoID string, version int, blkIDs []string, fileSize int64) (string, *appError) { + if len(blkIDs) == 0 { + return fsmgr.EmptySha1, nil + } + + for _, blkID := range blkIDs { + if !blockmgr.Exists(repoID, blkID) { + err := fmt.Errorf("failed to check block: %s", blkID) + return "", &appError{err, "", seafHTTPResBlockMissing} + } + } + + fileID, err := writeSeafile(repoID, version, fileSize, blkIDs) + if err != nil { + err := fmt.Errorf("failed to write seafile: %v", err) + return "", &appError{err, "", http.StatusInternalServerError} + } + + return fileID, nil +} + +func uploadRawBlksAPICB(rsp http.ResponseWriter, r *http.Request) *appError { + fsm, err := parseUploadHeaders(r) + if err != nil { + return err + } + + if err := doUploadRawBlks(rsp, r, fsm); err != nil { + formatJSONError(rsp, err) + return err + } + + return nil +} + +func doUploadRawBlks(rsp http.ResponseWriter, r *http.Request, fsm *recvData) *appError { + if err := r.ParseMultipartForm(1 << 20); err != nil { + return &appError{nil, "", http.StatusBadRequest} + } + defer r.MultipartForm.RemoveAll() + + repoID := fsm.repoID + user := fsm.user + + formFiles := r.MultipartForm.File + fileHeaders, ok := formFiles["file"] + if !ok { + msg := "No file in multipart form.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + for _, handler := range fileHeaders { + fileName := filepath.Base(handler.Filename) + fsm.fileNames = append(fsm.fileNames, fileName) + fsm.fileHeaders = append(fsm.fileHeaders, handler) + } + + if fsm.fileNames == nil { + msg := "No file.\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + if err := checkTmpFileList(fsm); err != nil { + return err + } + + if err := postBlocks(repoID, user, fsm); err != nil { + return err + } + + var contentLen int64 + lenstr := rsp.Header().Get("Content-Length") + if lenstr != "" { + conLen, err := strconv.ParseInt(lenstr, 10, 64) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to parse content len: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + contentLen = conLen + } + + oper := "web-file-upload" + sendStatisticMsg(repoID, user, oper, uint64(contentLen)) + + rsp.Write([]byte("\"OK\"")) + rsp.Header().Set("Content-Type", "application/json; charset=utf-8") + + return nil +} + +func postBlocks(repoID, user string, fsm *recvData) *appError { + blockIDs := fsm.fileNames + fileHeaders := fsm.fileHeaders + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Failed to get repo.\n" + err := fmt.Errorf("Failed to get repo %s", repoID) + return &appError{err, msg, http.StatusInternalServerError} + } + + if err := indexRawBlocks(repo.StoreID, blockIDs, fileHeaders); err != nil { + err := fmt.Errorf("failed to index file blocks") + return &appError{err, "", http.StatusInternalServerError} + } + + go updateRepoSize(repo.ID) + + return nil +} + +func indexRawBlocks(repoID string, blockIDs []string, fileHeaders []*multipart.FileHeader) error { + for i, handler := range fileHeaders { + var buf bytes.Buffer + f, err := handler.Open() + if err != nil { + err := fmt.Errorf("failed to open file for read: %v", err) + return err + } + _, err = buf.ReadFrom(f) + if err != nil { + err := fmt.Errorf("failed to read block: %v", err) + return err + } + checkSum := sha1.Sum(buf.Bytes()) + blkID := hex.EncodeToString(checkSum[:]) + if blkID != blockIDs[i] { + err := fmt.Errorf("block id %s:%s doesn't match content", blkID, blockIDs[i]) + return err + } + + err = blockmgr.Write(repoID, blkID, &buf) + if err != nil { + err := fmt.Errorf("failed to write block: %s/%s: %v", repoID, blkID, err) + return err + } + } + + return nil +} diff --git a/fileserver/fileserver.go b/fileserver/fileserver.go new file mode 100644 index 0000000..3c055b3 --- /dev/null +++ b/fileserver/fileserver.go @@ -0,0 +1,432 @@ +// Main package for Seafile file server. +package main + +import ( + "database/sql" + "flag" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + "strings" + + _ "github.com/go-sql-driver/mysql" + "github.com/gorilla/mux" + "github.com/haiwen/seafile-server/fileserver/blockmgr" + "github.com/haiwen/seafile-server/fileserver/commitmgr" + "github.com/haiwen/seafile-server/fileserver/fsmgr" + "github.com/haiwen/seafile-server/fileserver/repomgr" + "github.com/haiwen/seafile-server/fileserver/searpc" + "github.com/haiwen/seafile-server/fileserver/share" + _ "github.com/mattn/go-sqlite3" + "gopkg.in/ini.v1" +) + +var dataDir, absDataDir string +var centralDir string +var logFile, absLogFile string +var rpcPipePath string + +var dbType string +var groupTableName string +var cloudMode bool +var seafileDB, ccnetDB *sql.DB + +// when SQLite is used, user and group db are separated. +var userDB, groupDB *sql.DB + +type fileServerOptions struct { + host string + port uint32 + maxUploadSize uint64 + maxDownloadDirSize uint64 + // Block size for indexing uploaded files + fixedBlockSize uint64 + // Maximum number of goroutines to index uploaded files + maxIndexingThreads uint32 + webTokenExpireTime uint32 + // File mode for temp files + clusterSharedTempFileMode uint32 + windowsEncoding string + // Timeout for fs-id-list requests. + fsIDListRequestTimeout uint32 +} + +var options fileServerOptions + +func init() { + flag.StringVar(¢ralDir, "F", "", "central config directory") + flag.StringVar(&dataDir, "d", "", "seafile data directory") + flag.StringVar(&logFile, "l", "", "log file path") + flag.StringVar(&rpcPipePath, "p", "", "rpc pipe path") +} + +func loadCcnetDB() { + ccnetConfPath := filepath.Join(centralDir, "ccnet.conf") + config, err := ini.Load(ccnetConfPath) + if err != nil { + log.Fatalf("Failed to load ccnet.conf: %v", err) + } + + section, err := config.GetSection("Database") + if err != nil { + log.Fatal("No database section in ccnet.conf.") + } + + var dbEngine string = "sqlite" + key, err := section.GetKey("ENGINE") + if err == nil { + dbEngine = key.String() + } + + if strings.EqualFold(dbEngine, "mysql") { + if key, err = section.GetKey("HOST"); err != nil { + log.Fatal("No database host in ccnet.conf.") + } + host := key.String() + if key, err = section.GetKey("USER"); err != nil { + log.Fatal("No database user in ccnet.conf.") + } + user := key.String() + if key, err = section.GetKey("PASSWD"); err != nil { + log.Fatal("No database password in ccnet.conf.") + } + password := key.String() + if key, err = section.GetKey("DB"); err != nil { + log.Fatal("No database db_name in ccnet.conf.") + } + dbName := key.String() + port := 3306 + if key, err = section.GetKey("PORT"); err == nil { + port, _ = key.Int() + } + unixSocket := "" + if key, err = section.GetKey("UNIX_SOCKET"); err == nil { + unixSocket = key.String() + } + useTLS := false + if key, err = section.GetKey("USE_SSL"); err == nil { + useTLS, _ = key.Bool() + } + var dsn string + if unixSocket == "" { + dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t", user, password, host, port, dbName, useTLS) + } else { + dsn = fmt.Sprintf("%s:%s@unix(%s)/%s", user, password, unixSocket, dbName) + } + ccnetDB, err = sql.Open("mysql", dsn) + if err != nil { + log.Fatalf("Failed to open database: %v", err) + } + } else if strings.EqualFold(dbEngine, "sqlite") { + ccnetDBPath := filepath.Join(centralDir, "groupmgr.db") + ccnetDB, err = sql.Open("sqlite3", ccnetDBPath) + if err != nil { + log.Fatalf("Failed to open database %s: %v", ccnetDBPath, err) + } + } else { + log.Fatalf("Unsupported database %s.", dbEngine) + } +} + +func loadSeafileDB() { + var seafileConfPath string + seafileConfPath = filepath.Join(centralDir, "seafile.conf") + + config, err := ini.Load(seafileConfPath) + if err != nil { + log.Fatalf("Failed to load seafile.conf: %v", err) + } + + section, err := config.GetSection("database") + if err != nil { + log.Fatal("No database section in seafile.conf.") + } + + var dbEngine string = "sqlite" + key, err := section.GetKey("type") + if err == nil { + dbEngine = key.String() + } + if strings.EqualFold(dbEngine, "mysql") { + if key, err = section.GetKey("host"); err != nil { + log.Fatal("No database host in seafile.conf.") + } + host := key.String() + if key, err = section.GetKey("user"); err != nil { + log.Fatal("No database user in seafile.conf.") + } + user := key.String() + if key, err = section.GetKey("password"); err != nil { + log.Fatal("No database password in seafile.conf.") + } + password := key.String() + if key, err = section.GetKey("db_name"); err != nil { + log.Fatal("No database db_name in seafile.conf.") + } + dbName := key.String() + port := 3306 + if key, err = section.GetKey("port"); err == nil { + port, _ = key.Int() + } + unixSocket := "" + if key, err = section.GetKey("unix_socket"); err == nil { + unixSocket = key.String() + } + useTLS := false + if key, err = section.GetKey("use_ssl"); err == nil { + useTLS, _ = key.Bool() + } + + var dsn string + if unixSocket == "" { + dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t", user, password, host, port, dbName, useTLS) + } else { + dsn = fmt.Sprintf("%s:%s@unix(%s)/%s", user, password, unixSocket, dbName) + } + + seafileDB, err = sql.Open("mysql", dsn) + if err != nil { + log.Fatalf("Failed to open database: %v", err) + } + } else if strings.EqualFold(dbEngine, "sqlite") { + seafileDBPath := filepath.Join(absDataDir, "seafile.db") + seafileDB, err = sql.Open("sqlite3", seafileDBPath) + if err != nil { + log.Fatalf("Failed to open database %s: %v", seafileDBPath, err) + } + } else { + log.Fatalf("Unsupported database %s.", dbEngine) + } + dbType = dbEngine +} + +func loadFileServerOptions() { + var seafileConfPath string + seafileConfPath = filepath.Join(centralDir, "seafile.conf") + + config, err := ini.Load(seafileConfPath) + if err != nil { + log.Fatalf("Failed to load seafile.conf: %v", err) + } + cloudMode = false + if section, err := config.GetSection("general"); err == nil { + if key, err := section.GetKey("cloud_mode"); err == nil { + cloudMode, _ = key.Bool() + } + } + + initDefaultOptions() + if section, err := config.GetSection("fileserver"); err == nil { + if key, err := section.GetKey("host"); err == nil { + options.host = key.String() + } + if key, err := section.GetKey("port"); err == nil { + port, err := key.Uint() + if err == nil { + options.port = uint32(port) + } + } + if key, err := section.GetKey("max_indexing_threads"); err == nil { + threads, err := key.Uint() + if err == nil { + options.maxIndexingThreads = uint32(threads) + } + } + if key, err := section.GetKey("fixed_block_size"); err == nil { + blkSize, err := key.Uint64() + if err == nil { + options.fixedBlockSize = blkSize + } + } + if key, err := section.GetKey("web_token_expire_time"); err == nil { + expire, err := key.Uint() + if err == nil { + options.webTokenExpireTime = uint32(expire) + } + } + if key, err := section.GetKey("cluster_shared_temp_file_mode"); err == nil { + fileMode, err := key.Uint() + if err == nil { + options.clusterSharedTempFileMode = uint32(fileMode) + } + } + } + + ccnetConfPath := filepath.Join(centralDir, "ccnet.conf") + config, err = ini.Load(ccnetConfPath) + if err != nil { + log.Fatalf("Failed to load ccnet.conf: %v", err) + } + groupTableName = "Group" + if section, err := config.GetSection("GROUP"); err == nil { + if key, err := section.GetKey("TABLE_NAME"); err == nil { + groupTableName = key.String() + } + } +} + +func initDefaultOptions() { + options.host = "0.0.0.0" + options.port = 8082 + options.maxDownloadDirSize = 100 * (1 << 20) + options.fixedBlockSize = 1 << 23 + options.maxIndexingThreads = 1 + options.webTokenExpireTime = 7200 + options.clusterSharedTempFileMode = 0600 +} + +func main() { + flag.Parse() + + if centralDir == "" { + log.Fatal("central config directory must be specified.") + } + _, err := os.Stat(centralDir) + if os.IsNotExist(err) { + log.Fatalf("central config directory %s doesn't exist: %v.", centralDir, err) + } + loadCcnetDB() + + if dataDir == "" { + log.Fatal("seafile data directory must be specified.") + } + _, err = os.Stat(dataDir) + if os.IsNotExist(err) { + log.Fatalf("seafile data directory %s doesn't exist: %v.", dataDir, err) + } + absDataDir, err = filepath.Abs(dataDir) + if err != nil { + log.Fatalf("Failed to convert seafile data dir to absolute path: %v.", err) + } + loadSeafileDB() + loadFileServerOptions() + + if logFile == "" { + absLogFile = filepath.Join(absDataDir, "seafile.log") + fp, err := os.OpenFile(absLogFile, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + log.Fatalf("Failed to open or create log file: %v", err) + } + log.SetOutput(fp) + } else if logFile != "-" { + absLogFile, err = filepath.Abs(logFile) + if err != nil { + log.Fatalf("Failed to convert log file path to absolute path: %v", err) + } + fp, err := os.OpenFile(absLogFile, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + log.Fatalf("Failed to open or create log file: %v", err) + } + log.SetOutput(fp) + } + // When logFile is "-", use default output (StdOut) + + log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) + + repomgr.Init(seafileDB) + + fsmgr.Init(centralDir, dataDir) + + blockmgr.Init(centralDir, dataDir) + + commitmgr.Init(centralDir, dataDir) + + share.Init(ccnetDB, seafileDB, groupTableName, cloudMode) + + rpcClientInit() + + syncAPIInit() + + sizeSchedulerInit() + + initUpload() + + router := newHTTPRouter() + + log.Print("Seafile file server started.") + + addr := fmt.Sprintf("%s:%d", options.host, options.port) + err = http.ListenAndServe(addr, router) + if err != nil { + log.Printf("File server exiting: %v", err) + } +} + +var rpcclient *searpc.Client + +func rpcClientInit() { + var pipePath string + if rpcPipePath != "" { + pipePath = filepath.Join(rpcPipePath, "seafile.sock") + } else { + pipePath = filepath.Join(absDataDir, "seafile.sock") + } + rpcclient = searpc.Init(pipePath, "seafserv-threaded-rpcserver") +} + +func newHTTPRouter() *mux.Router { + r := mux.NewRouter() + r.HandleFunc("/protocol-version", handleProtocolVersion) + r.Handle("/files/{.*}/{.*}", appHandler(accessCB)) + r.Handle("/blks/{.*}/{.*}", appHandler(accessBlksCB)) + r.Handle("/zip/{.*}", appHandler(accessZipCB)) + r.Handle("/upload-api/{.*}", appHandler(uploadAPICB)) + r.Handle("/upload-aj/{.*}", appHandler(uploadAjaxCB)) + r.Handle("/update-api/{.*}", appHandler(updateAPICB)) + r.Handle("/update-aj/{.*}", appHandler(updateAjaxCB)) + r.Handle("/upload-blks-api/{.*}", appHandler(uploadBlksAPICB)) + r.Handle("/upload-raw-blks-api/{.*}", appHandler(uploadRawBlksAPICB)) + // file syncing api + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/permission-check/", + appHandler(permissionCheckCB)) + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/commit/{HEAD:HEAD\\/?}", + appHandler(headCommitOperCB)) + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/commit/{id:[\\da-z]{40}}", + appHandler(commitOperCB)) + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/block/{id:[\\da-z]{40}}", + appHandler(blockOperCB)) + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/fs-id-list/", + appHandler(getFsObjIDCB)) + r.Handle("/repo/head-commits-multi/", + appHandler(headCommitsMultiCB)) + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/pack-fs/", + appHandler(packFSCB)) + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/check-fs/", + appHandler(checkFSCB)) + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/check-blocks/", + appHandler(checkBlockCB)) + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/recv-fs/", + appHandler(recvFSCB)) + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/quota-check/", + appHandler(getCheckQuotaCB)) + + // seadrive api + r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/block-map/{id:[\\da-z]{40}}", + appHandler(getBlockMapCB)) + r.Handle("/accessible-repos", appHandler(getAccessibleRepoListCB)) + return r +} + +func handleProtocolVersion(rsp http.ResponseWriter, r *http.Request) { + io.WriteString(rsp, "{\"version\": 2}") +} + +type appError struct { + Error error + Message string + Code int +} + +type appHandler func(http.ResponseWriter, *http.Request) *appError + +func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if e := fn(w, r); e != nil { + if e.Error != nil && e.Code == http.StatusInternalServerError { + log.Printf("path %s internal server error: %v\n", r.URL.Path, e.Error) + } + http.Error(w, e.Message, e.Code) + } +} diff --git a/fileserver/fsmgr/fsmgr.go b/fileserver/fsmgr/fsmgr.go new file mode 100644 index 0000000..bb336bc --- /dev/null +++ b/fileserver/fsmgr/fsmgr.go @@ -0,0 +1,521 @@ +// Package fsmgr manages fs objects +package fsmgr + +import ( + "bytes" + "compress/zlib" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "path/filepath" + "strings" + "syscall" + + "github.com/haiwen/seafile-server/fileserver/objstore" +) + +// Seafile is a file object +type Seafile struct { + Version int `json:"version"` + FileType int `json:"type,omitempty"` + FileID string `json:"file_id,omitempty"` + FileSize uint64 `json:"size"` + BlkIDs []string `json:"block_ids"` +} + +// SeafDirent is a dir entry object +type SeafDirent struct { + Mode uint32 `json:"mode"` + ID string `json:"id"` + Name string `json:"name"` + Mtime int64 `json:"mtime"` + Modifier string `json:"modifier"` + Size int64 `json:"size"` +} + +//SeafDir is a dir object +type SeafDir struct { + Version int `json:"version"` + DirType int `json:"type,omitempty"` + DirID string `json:"dir_id,omitempty"` + Entries []*SeafDirent `json:"dirents"` +} + +// FileCountInfo contains information of files +type FileCountInfo struct { + FileCount int64 + Size int64 + DirCount int64 +} + +// Meta data type of dir or file +const ( + SeafMetadataTypeInvalid = iota + SeafMetadataTypeFile + SeafMetadataTypeLink + SeafMetadataTypeDir +) + +var store *objstore.ObjectStore + +// Empty value of sha1 +const ( + EmptySha1 = "0000000000000000000000000000000000000000" +) + +// Init initializes fs manager and creates underlying object store. +func Init(seafileConfPath string, seafileDataDir string) { + store = objstore.New(seafileConfPath, seafileDataDir, "fs") +} + +// NewDirent initializes a SeafDirent object +func NewDirent(id string, name string, mode uint32, mtime int64, modifier string, size int64) *SeafDirent { + dent := new(SeafDirent) + dent.ID = id + if id == "" { + dent.ID = EmptySha1 + } + dent.Name = name + dent.Mode = mode + dent.Mtime = mtime + if IsRegular(mode) { + dent.Modifier = modifier + dent.Size = size + } + + return dent +} + +// NewSeafdir initializes a SeafDir object +func NewSeafdir(version int, entries []*SeafDirent) (*SeafDir, error) { + dir := new(SeafDir) + dir.Version = version + dir.Entries = entries + jsonstr, err := json.Marshal(dir) + if err != nil { + err := fmt.Errorf("failed to convert seafdir to json") + return nil, err + } + checksum := sha1.Sum(jsonstr) + dir.DirID = hex.EncodeToString(checksum[:]) + + return dir, nil +} + +// NewSeafile initializes a Seafile object +func NewSeafile(version int, fileSize int64, blkIDs []string) (*Seafile, error) { + seafile := new(Seafile) + seafile.Version = version + seafile.FileSize = uint64(fileSize) + seafile.BlkIDs = blkIDs + + jsonstr, err := json.Marshal(seafile) + if err != nil { + err := fmt.Errorf("failed to convert seafile to json") + return nil, err + } + checkSum := sha1.Sum(jsonstr) + seafile.FileID = hex.EncodeToString(checkSum[:]) + + return seafile, nil +} + +func uncompress(p []byte) ([]byte, error) { + b := bytes.NewReader(p) + var out bytes.Buffer + r, err := zlib.NewReader(b) + if err != nil { + return nil, err + } + + _, err = io.Copy(&out, r) + if err != nil { + r.Close() + return nil, err + } + + r.Close() + + return out.Bytes(), nil +} + +func compress(p []byte) ([]byte, error) { + var out bytes.Buffer + w := zlib.NewWriter(&out) + + _, err := w.Write(p) + if err != nil { + w.Close() + return nil, err + } + + w.Close() + + return out.Bytes(), nil +} + +// FromData reads from p and converts JSON-encoded data to Seafile. +func (seafile *Seafile) FromData(p []byte) error { + b, err := uncompress(p) + if err != nil { + return err + } + err = json.Unmarshal(b, seafile) + if err != nil { + return err + } + + return nil +} + +// ToData converts seafile to JSON-encoded data and writes to w. +func (seafile *Seafile) ToData(w io.Writer) error { + jsonstr, err := json.Marshal(seafile) + if err != nil { + return err + } + + buf, err := compress(jsonstr) + if err != nil { + return err + } + + _, err = w.Write(buf) + if err != nil { + return err + } + + return nil +} + +// ToData converts seafdir to JSON-encoded data and writes to w. +func (seafdir *SeafDir) ToData(w io.Writer) error { + jsonstr, err := json.Marshal(seafdir) + if err != nil { + return err + } + + buf, err := compress(jsonstr) + if err != nil { + return err + } + + _, err = w.Write(buf) + if err != nil { + return err + } + + return nil +} + +// FromData reads from p and converts JSON-encoded data to SeafDir. +func (seafdir *SeafDir) FromData(p []byte) error { + b, err := uncompress(p) + if err != nil { + return err + } + err = json.Unmarshal(b, seafdir) + if err != nil { + return err + } + + return nil +} + +// ReadRaw reads data in binary format from storage backend. +func ReadRaw(repoID string, objID string, w io.Writer) error { + err := store.Read(repoID, objID, w) + if err != nil { + return err + } + + return nil +} + +// WriteRaw writes data in binary format to storage backend. +func WriteRaw(repoID string, objID string, r io.Reader) error { + err := store.Write(repoID, objID, r, false) + if err != nil { + return err + } + return nil +} + +// GetSeafile gets seafile from storage backend. +func GetSeafile(repoID string, fileID string) (*Seafile, error) { + var buf bytes.Buffer + seafile := new(Seafile) + if fileID == EmptySha1 { + seafile.FileID = EmptySha1 + return seafile, nil + } + + err := ReadRaw(repoID, fileID, &buf) + if err != nil { + errors := fmt.Errorf("failed to read seafile object from storage : %v", err) + return nil, errors + } + + err = seafile.FromData(buf.Bytes()) + if err != nil { + errors := fmt.Errorf("failed to parse seafile object %s/%s : %v", repoID, fileID, err) + return nil, errors + } + + if seafile.Version < 1 { + errors := fmt.Errorf("seafile object %s/%s version should be > 0", repoID, fileID) + return nil, errors + } + + seafile.FileID = fileID + + return seafile, nil +} + +// SaveSeafile saves seafile to storage backend. +func SaveSeafile(repoID string, seafile *Seafile) error { + fileID := seafile.FileID + + exist, _ := store.Exists(repoID, fileID) + if exist { + return nil + } + + seafile.FileType = SeafMetadataTypeFile + var buf bytes.Buffer + err := seafile.ToData(&buf) + if err != nil { + errors := fmt.Errorf("failed to convert seafile object %s/%s to json", repoID, fileID) + return errors + } + + err = WriteRaw(repoID, fileID, &buf) + if err != nil { + errors := fmt.Errorf("failed to write seafile object to storage : %v", err) + return errors + } + + return nil +} + +// GetSeafdir gets seafdir from storage backend. +func GetSeafdir(repoID string, dirID string) (*SeafDir, error) { + var buf bytes.Buffer + seafdir := new(SeafDir) + if dirID == EmptySha1 { + seafdir.DirID = EmptySha1 + return seafdir, nil + } + + err := ReadRaw(repoID, dirID, &buf) + if err != nil { + errors := fmt.Errorf("failed to read seafdir object from storage : %v", err) + return nil, errors + } + + err = seafdir.FromData(buf.Bytes()) + if err != nil { + errors := fmt.Errorf("failed to parse seafdir object %s/%s : %v", repoID, dirID, err) + return nil, errors + } + + if seafdir.Version < 1 { + errors := fmt.Errorf("seadir object %s/%s version should be > 0", repoID, dirID) + return nil, errors + } + + seafdir.DirID = dirID + + return seafdir, nil +} + +// SaveSeafdir saves seafdir to storage backend. +func SaveSeafdir(repoID string, seafdir *SeafDir) error { + dirID := seafdir.DirID + exist, _ := store.Exists(repoID, dirID) + if exist { + return nil + } + + seafdir.DirType = SeafMetadataTypeDir + var buf bytes.Buffer + err := seafdir.ToData(&buf) + if err != nil { + errors := fmt.Errorf("failed to convert seafdir object %s/%s to json", repoID, dirID) + return errors + } + + err = WriteRaw(repoID, dirID, &buf) + if err != nil { + errors := fmt.Errorf("failed to write seafdir object to storage : %v", err) + return errors + } + + return nil +} + +// Exists check if fs object is exists. +func Exists(repoID string, objID string) (bool, error) { + if objID == EmptySha1 { + return true, nil + } + return store.Exists(repoID, objID) +} + +func comp(c rune) bool { + if c == '/' { + return true + } + return false +} + +// IsDir check if the mode is dir. +func IsDir(m uint32) bool { + return (m & syscall.S_IFMT) == syscall.S_IFDIR +} + +// IsRegular Check if the mode is regular. +func IsRegular(m uint32) bool { + return (m & syscall.S_IFMT) == syscall.S_IFREG +} + +// ErrPathNoExist is an error indicating that the file does not exist +var ErrPathNoExist = fmt.Errorf("path does not exist") + +// GetSeafdirByPath gets the object of seafdir by path. +func GetSeafdirByPath(repoID string, rootID string, path string) (*SeafDir, error) { + dir, err := GetSeafdir(repoID, rootID) + if err != nil { + errors := fmt.Errorf("directory is missing") + return nil, errors + } + + path = filepath.Join("/", path) + parts := strings.FieldsFunc(path, comp) + var dirID string + for _, name := range parts { + entries := dir.Entries + for _, v := range entries { + if v.Name == name && IsDir(v.Mode) { + dirID = v.ID + break + } + } + + if dirID == `` { + return nil, ErrPathNoExist + } + + dir, err = GetSeafdir(repoID, dirID) + if err != nil { + errors := fmt.Errorf("directory is missing") + return nil, errors + } + } + + return dir, nil +} + +// GetSeafdirIDByPath gets the dirID of SeafDir by path. +func GetSeafdirIDByPath(repoID, rootID, path string) (string, error) { + dirID, mode, err := GetObjIDByPath(repoID, rootID, path) + if err != nil { + err := fmt.Errorf("failed to get dir id by path: %s: %v", path, err) + return "", err + } + if dirID == "" || !IsDir(mode) { + return "", nil + } + + return dirID, nil +} + +// GetObjIDByPath gets the obj id by path +func GetObjIDByPath(repoID, rootID, path string) (string, uint32, error) { + var name string + var baseDir *SeafDir + formatPath := filepath.Join(path) + if len(formatPath) == 0 || formatPath == "/" { + return rootID, syscall.S_IFDIR, nil + } + index := strings.Index(formatPath, "/") + if index < 0 { + dir, err := GetSeafdir(repoID, rootID) + if err != nil { + err := fmt.Errorf("failed to find root dir %s: %v", rootID, err) + return "", 0, err + } + name = formatPath + baseDir = dir + } else { + name = filepath.Base(formatPath) + dirName := filepath.Dir(formatPath) + dir, err := GetSeafdirByPath(repoID, rootID, dirName) + if err != nil { + if err == ErrPathNoExist { + return "", syscall.S_IFDIR, ErrPathNoExist + } + err := fmt.Errorf("failed to find dir %s in repo %s: %v", dirName, repoID, err) + return "", syscall.S_IFDIR, err + } + baseDir = dir + } + + entries := baseDir.Entries + for _, de := range entries { + if de.Name == name { + return de.ID, de.Mode, nil + } + } + + return "", 0, nil + +} + +// GetFileCountInfoByPath gets the count info of file by path. +func GetFileCountInfoByPath(repoID, rootID, path string) (*FileCountInfo, error) { + dirID, err := GetSeafdirIDByPath(repoID, rootID, path) + if err != nil { + err := fmt.Errorf("failed to get file count info for repo %s path %s: %v", repoID, path, err) + return nil, err + } + + info, err := getFileCountInfo(repoID, dirID) + if err != nil { + err := fmt.Errorf("failed to get file count in repo %s: %v", repoID, err) + return nil, err + } + + return info, nil +} + +func getFileCountInfo(repoID, dirID string) (*FileCountInfo, error) { + dir, err := GetSeafdir(repoID, dirID) + if err != nil { + err := fmt.Errorf("failed to get dir: %v", err) + return nil, err + } + + info := new(FileCountInfo) + + entries := dir.Entries + for _, de := range entries { + if IsDir(de.Mode) { + tmpInfo, err := getFileCountInfo(repoID, de.ID) + if err != nil { + err := fmt.Errorf("failed to get file count: %v", err) + return nil, err + } + info.DirCount = tmpInfo.DirCount + 1 + info.FileCount += tmpInfo.FileCount + info.Size += tmpInfo.Size + } else { + info.FileCount++ + info.Size += de.Size + } + } + + return info, nil +} diff --git a/fileserver/fsmgr/fsmgr_test.go b/fileserver/fsmgr/fsmgr_test.go new file mode 100644 index 0000000..5a63928 --- /dev/null +++ b/fileserver/fsmgr/fsmgr_test.go @@ -0,0 +1,129 @@ +package fsmgr + +import ( + "fmt" + "os" + "testing" +) + +const ( + seafileConfPath = "/tmp/conf" + seafileDataDir = "/tmp/conf/seafile-data" + repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" + blkID = "0401fc662e3bc87a41f299a907c056aaf8322a26" + subDirID = "0401fc662e3bc87a41f299a907c056aaf8322a27" +) + +var dirID string +var fileID string + +func createFile() error { + var blkIDs []string + for i := 0; i < 2; i++ { + blkshal := blkID + blkIDs = append(blkIDs, blkshal) + } + + seafile, err := NewSeafile(1, 100, blkIDs) + + err = SaveSeafile(repoID, seafile) + if err != nil { + return err + } + fileID = seafile.FileID + + var entries []*SeafDirent + for i := 0; i < 2; i++ { + dirent := SeafDirent{ID: subDirID, Name: "/", Mode: 0x4000} + entries = append(entries, &dirent) + } + seafdir, err := NewSeafdir(1, entries) + if err != nil { + err := fmt.Errorf("failed to new seafdir: %v", err) + return err + } + err = SaveSeafdir(repoID, seafdir) + if err != nil { + return err + } + + dirID = seafdir.DirID + + return nil +} + +func delFile() error { + err := os.RemoveAll(seafileConfPath) + if err != nil { + return err + } + + return nil +} + +func TestMain(m *testing.M) { + Init(seafileConfPath, seafileDataDir) + err := createFile() + if err != nil { + fmt.Printf("Failed to create test file : %v.\n", err) + os.Exit(1) + } + code := m.Run() + err = delFile() + if err != nil { + fmt.Printf("Failed to remove test file : %v\n", err) + } + os.Exit(code) +} + +func TestGetSeafile(t *testing.T) { + exists, err := Exists(repoID, fileID) + if !exists { + t.Errorf("seafile is not exists : %v.\n", err) + } + seafile, err := GetSeafile(repoID, fileID) + if err != nil || seafile == nil { + t.Errorf("Failed to get seafile : %v.\n", err) + t.FailNow() + } + + for _, v := range seafile.BlkIDs { + if v != blkID { + t.Errorf("Wrong file content.\n") + } + } +} + +func TestGetSeafdir(t *testing.T) { + exists, err := Exists(repoID, dirID) + if !exists { + t.Errorf("seafile is not exists : %v.\n", err) + } + seafdir, err := GetSeafdir(repoID, dirID) + if err != nil || seafdir == nil { + t.Errorf("Failed to get seafdir : %v.\n", err) + t.FailNow() + } + + for _, v := range seafdir.Entries { + if v.ID != subDirID { + t.Errorf("Wrong file content.\n") + } + } + +} + +func TestGetSeafdirByPath(t *testing.T) { + seafdir, err := GetSeafdirByPath(repoID, dirID, "/") + if err != nil || seafdir == nil { + t.Errorf("Failed to get seafdir : %v.\n", err) + t.FailNow() + } + + for _, v := range seafdir.Entries { + if v.ID != subDirID { + t.Errorf("Wrong file content.\n") + } + } + +} diff --git a/fileserver/go.mod b/fileserver/go.mod new file mode 100644 index 0000000..b90c647 --- /dev/null +++ b/fileserver/go.mod @@ -0,0 +1,12 @@ +module github.com/haiwen/seafile-server/fileserver + +go 1.14 + +require ( + github.com/go-sql-driver/mysql v1.5.0 + github.com/google/uuid v1.1.1 + github.com/gorilla/mux v1.7.4 + github.com/mattn/go-sqlite3 v1.14.0 + github.com/smartystreets/goconvey v1.6.4 // indirect + gopkg.in/ini.v1 v1.55.0 +) diff --git a/fileserver/go.sum b/fileserver/go.sum new file mode 100644 index 0000000..64a4a29 --- /dev/null +++ b/fileserver/go.sum @@ -0,0 +1,29 @@ +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA= +github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.3 h1:QdmJJYlDQhMDFrFP8IvVnx66D8mCbaQM4TsxKf7BXzo= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ= +gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= diff --git a/fileserver/http_code.go b/fileserver/http_code.go new file mode 100644 index 0000000..b62ed22 --- /dev/null +++ b/fileserver/http_code.go @@ -0,0 +1,12 @@ +package main + +const ( + seafHTTPResBadFileName = 440 + seafHTTPResExists = 441 + seafHTTPResNotExists = 441 + seafHTTPResTooLarge = 442 + seafHTTPResNoQuota = 443 + seafHTTPResRepoDeleted = 444 + seafHTTPResRepoCorrupted = 445 + seafHTTPResBlockMissing = 446 +) diff --git a/fileserver/merge.go b/fileserver/merge.go new file mode 100644 index 0000000..cf1f37e --- /dev/null +++ b/fileserver/merge.go @@ -0,0 +1,402 @@ +package main + +import ( + "fmt" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/haiwen/seafile-server/fileserver/commitmgr" + "github.com/haiwen/seafile-server/fileserver/fsmgr" +) + +type mergeOptions struct { + remoteRepoID string + remoteHead string + mergedRoot string + conflict bool +} + +func mergeTrees(storeID string, roots []string, opt *mergeOptions) error { + if len(roots) != 3 { + err := fmt.Errorf("invalid argument") + return err + } + + var trees []*fsmgr.SeafDir + for i := 0; i < 3; i++ { + dir, err := fsmgr.GetSeafdir(storeID, roots[i]) + if err != nil { + err := fmt.Errorf("failed to get dir: %v", err) + return err + } + trees = append(trees, dir) + } + + err := mergeTreesRecursive(storeID, trees, "", opt) + if err != nil { + err := fmt.Errorf("failed to merge trees: %v", err) + return err + } + + return nil +} + +func mergeTreesRecursive(storeID string, trees []*fsmgr.SeafDir, baseDir string, opt *mergeOptions) error { + var ptrs [3][]*fsmgr.SeafDirent + var mergedDents []*fsmgr.SeafDirent + + n := 3 + for i := 0; i < n; i++ { + if trees[i] != nil { + ptrs[i] = trees[i].Entries + } + } + + var done bool + var offset = make([]int, n) + for { + dents := make([]*fsmgr.SeafDirent, n) + var firstName string + done = true + for i := 0; i < n; i++ { + if len(ptrs[i]) > offset[i] { + done = false + dent := ptrs[i][offset[i]] + if firstName == "" { + firstName = dent.Name + } else if dent.Name > firstName { + firstName = dent.Name + } + } + } + + if done { + break + } + + var nFiles, nDirs int + for i := 0; i < n; i++ { + if len(ptrs[i]) > offset[i] { + dent := ptrs[i][offset[i]] + if firstName == dent.Name { + if fsmgr.IsDir(dent.Mode) { + nDirs++ + } else { + nFiles++ + } + dents[i] = dent + offset[i]++ + } + } + } + + if nFiles > 0 { + retDents, err := mergeEntries(storeID, dents, baseDir, opt) + if err != nil { + return err + } + mergedDents = append(mergedDents, retDents...) + } + + if nDirs > 0 { + retDents, err := mergeDirectories(storeID, dents, baseDir, opt) + if err != nil { + return err + } + mergedDents = append(mergedDents, retDents...) + } + } + + sort.Sort(Dirents(mergedDents)) + mergedTree, err := fsmgr.NewSeafdir(1, mergedDents) + if err != nil { + err := fmt.Errorf("failed to new seafdir: %v", err) + return err + } + + opt.mergedRoot = mergedTree.DirID + + if trees[1] != nil && trees[1].DirID == mergedTree.DirID || + trees[2] != nil && trees[2].DirID == mergedTree.DirID { + return nil + } + + err = fsmgr.SaveSeafdir(storeID, mergedTree) + if err != nil { + err := fmt.Errorf("failed to save merged tree %s/%s", storeID, baseDir) + return err + } + + return nil +} + +func mergeEntries(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) { + var mergedDents []*fsmgr.SeafDirent + n := 3 + files := make([]*fsmgr.SeafDirent, n) + + for i := 0; i < n; i++ { + if dents[i] != nil && !fsmgr.IsDir(dents[i].Mode) { + files[i] = dents[i] + } + } + + base := files[0] + head := files[1] + remote := files[2] + + if head != nil && remote != nil { + if head.ID == remote.ID { + mergedDents = append(mergedDents, head) + } else if base != nil && base.ID == head.ID { + mergedDents = append(mergedDents, remote) + } else if base != nil && base.ID == remote.ID { + mergedDents = append(mergedDents, head) + } else { + conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, head.Name) + if conflictName == "" { + err := fmt.Errorf("failed to generate conflict file name") + return nil, err + } + dents[2].Name = conflictName + mergedDents = append(mergedDents, remote) + opt.conflict = true + } + } else if base != nil && head == nil && remote != nil { + if base.ID != remote.ID { + if dents[1] != nil { + conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name) + if conflictName == "" { + err := fmt.Errorf("failed to generate conflict file name") + return nil, err + } + dents[2].Name = conflictName + mergedDents = append(mergedDents, remote) + opt.conflict = true + } else { + mergedDents = append(mergedDents, remote) + } + } + } else if base != nil && head != nil && remote == nil { + if base.ID != head.ID { + if dents[2] != nil { + conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name) + if conflictName == "" { + err := fmt.Errorf("failed to generate conflict file name") + return nil, err + } + dents[2].Name = conflictName + mergedDents = append(mergedDents, head) + opt.conflict = true + } else { + mergedDents = append(mergedDents, head) + } + } + } else if base == nil && head == nil && remote != nil { + if dents[1] == nil { + mergedDents = append(mergedDents, remote) + } else if dents[0] != nil && dents[0].ID == dents[1].ID { + mergedDents = append(mergedDents, remote) + } else { + conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name) + if conflictName == "" { + err := fmt.Errorf("failed to generate conflict file name") + return nil, err + } + dents[2].Name = conflictName + mergedDents = append(mergedDents, remote) + opt.conflict = true + } + } else if base == nil && head != nil && remote == nil { + if dents[2] == nil { + mergedDents = append(mergedDents, head) + } else if dents[0] != nil && dents[0].ID == dents[2].ID { + mergedDents = append(mergedDents, head) + } else { + conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name) + if conflictName == "" { + err := fmt.Errorf("failed to generate conflict file name") + return nil, err + } + dents[2].Name = conflictName + mergedDents = append(mergedDents, head) + opt.conflict = true + } + } else if base != nil && head == nil && remote == nil { + } + + return mergedDents, nil +} + +func mergeDirectories(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) { + var dirMask int + var mergedDents []*fsmgr.SeafDirent + var dirName string + n := 3 + subDirs := make([]*fsmgr.SeafDir, n) + for i := 0; i < n; i++ { + if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { + dirMask |= 1 << i + } + } + + switch dirMask { + case 0: + err := fmt.Errorf("no dirent for merge") + return nil, err + case 1: + return mergedDents, nil + case 2: + mergedDents = append(mergedDents, dents[1]) + return mergedDents, nil + case 3: + if dents[0].ID == dents[1].ID { + return mergedDents, nil + } + break + case 4: + mergedDents = append(mergedDents, dents[2]) + return mergedDents, nil + case 5: + if dents[0].ID == dents[2].ID { + return mergedDents, nil + } + break + case 6: + case 7: + if dents[1].ID == dents[2].ID { + mergedDents = append(mergedDents, dents[1]) + return mergedDents, nil + } else if dents[0] != nil && dents[0].ID == dents[1].ID { + mergedDents = append(mergedDents, dents[2]) + return mergedDents, nil + } else if dents[0] != nil && dents[0].ID == dents[2].ID { + mergedDents = append(mergedDents, dents[1]) + return mergedDents, nil + } + break + default: + err := fmt.Errorf("wrong dir mask for merge") + return nil, err + } + + for i := 0; i < n; i++ { + subDirs[i] = nil + } + + for i := 0; i < n; i++ { + if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { + dir, err := fsmgr.GetSeafdir(storeID, dents[i].ID) + if err != nil { + err := fmt.Errorf("failed to get seafdir %s/%s", storeID, dents[i].ID) + return nil, err + } + subDirs[i] = dir + dirName = dents[i].Name + } + } + + newBaseDir := filepath.Join(baseDir, dirName) + newBaseDir = newBaseDir + "/" + err := mergeTreesRecursive(storeID, subDirs, newBaseDir, opt) + if err != nil { + err := fmt.Errorf("failed to merge trees: %v", err) + return nil, err + } + + if dirMask == 3 || dirMask == 6 || dirMask == 7 { + dent := dents[1] + dent.ID = opt.mergedRoot + mergedDents = append(mergedDents, dent) + } else if dirMask == 5 { + dent := dents[2] + dent.ID = opt.mergedRoot + mergedDents = append(mergedDents, dent) + } + + return mergedDents, nil +} + +func mergeConflictFileName(storeID string, opt *mergeOptions, baseDir, fileName string) (string, error) { + var modifier string + var mtime int64 + filePath := filepath.Join(baseDir, fileName) + modifier, mtime, err := getFileModifierMtime(opt.remoteRepoID, storeID, opt.remoteHead, filePath) + if err != nil { + commit, err := commitmgr.Load(opt.remoteRepoID, opt.remoteHead) + if err != nil { + err := fmt.Errorf("failed to get head commit") + return "", err + } + modifier = commit.CreatorName + mtime = time.Now().Unix() + } + + conflictName := genConflictPath(fileName, modifier, mtime) + + return conflictName, nil +} + +func genConflictPath(originPath, modifier string, mtime int64) string { + var conflictPath string + now := time.Now() + timeBuf := now.Format("2006-Jan-2-15-04-05") + dot := strings.Index(originPath, ".") + if dot < 0 { + if modifier != "" { + conflictPath = fmt.Sprintf("%s (SFConflict %s %s)", + originPath, modifier, timeBuf) + } else { + conflictPath = fmt.Sprintf("%s (SFConflict %s)", + originPath, timeBuf) + } + } else { + if modifier != "" { + conflictPath = fmt.Sprintf("%s (SFConflict %s %s).%s", + originPath, modifier, timeBuf, originPath[dot+1:]) + } else { + conflictPath = fmt.Sprintf("%s (SFConflict %s).%s", + originPath, timeBuf, originPath[dot+1:]) + } + } + + return conflictPath +} + +func getFileModifierMtime(repoID, storeID, head, filePath string) (string, int64, error) { + commit, err := commitmgr.Load(repoID, head) + if err != nil { + err := fmt.Errorf("failed to get head commit") + return "", -1, err + } + + parent := filepath.Dir(filePath) + if parent == "." { + parent = "" + } + + fileName := filepath.Base(filePath) + dir, err := fsmgr.GetSeafdirByPath(storeID, commit.RootID, parent) + if err != nil { + err := fmt.Errorf("dir %s doesn't exist in repo %s", parent, repoID) + return "", -1, err + } + + var dent *fsmgr.SeafDirent + entries := dir.Entries + for _, d := range entries { + if d.Name == fileName { + dent = d + break + } + } + + if dent == nil { + err := fmt.Errorf("file %s doesn't exist in repo %s", fileName, repoID) + return "", -1, err + } + + return dent.Modifier, dent.Mtime, nil +} diff --git a/fileserver/merge_test.go b/fileserver/merge_test.go new file mode 100644 index 0000000..14ee59b --- /dev/null +++ b/fileserver/merge_test.go @@ -0,0 +1,485 @@ +package main + +import ( + "fmt" + "os" + "syscall" + "testing" + + "github.com/haiwen/seafile-server/fileserver/commitmgr" + "github.com/haiwen/seafile-server/fileserver/fsmgr" +) + +const ( + mergeTestCommitID = "0401fc662e3bc87a41f299a907c056aaf8322a27" + mergeTestRepoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" + mergeTestSeafileConfPath = "/tmp/conf" + mergeTestSeafileDataDir = "/tmp/conf/seafile-data" +) + +var mergeTestTree1 string +var mergeTestTree2 string +var mergeTestTree3 string +var mergeTestTree4 string +var mergeTestTree5 string +var mergeTestTree1CommitID string +var mergeTestTree2CommitID string +var mergeTestTree3CommitID string +var mergeTestTree4CommitID string +var mergeTestTree5CommitID string + +/* + test directory structure: + tree1 + |--bbb + |-- testfile(size:1) + + tree2 + |--bbb + |-- testfile(size:10) + + tree3 + |--bbb + + tree4 + |--bbb + |-- testfile(size:100) + + tree5 + |-- +*/ +func mergeTestCreateTestDir() error { + modeDir := uint32(syscall.S_IFDIR | 0644) + modeFile := uint32(syscall.S_IFREG | 0644) + + emptyDir, err := mergeTestCreateSeafdir(nil) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + mergeTestTree5 = emptyDir + + file1, err := fsmgr.NewSeafile(1, 1, nil) + if err != nil { + err := fmt.Errorf("failed to new seafile: %v", err) + return err + } + err = fsmgr.SaveSeafile(mergeTestRepoID, file1) + if err != nil { + err := fmt.Errorf("failed to save seafile: %v", err) + return err + } + + dent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: "testfile", Mode: modeFile, Size: 1} + dir1, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + dent2 := fsmgr.SeafDirent{ID: dir1, Name: "bbb", Mode: modeDir} + dir2, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + + mergeTestTree1 = dir2 + + commit1 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree1, "seafile", "this is the first commit.\n") + err = commitmgr.Save(commit1) + if err != nil { + err := fmt.Errorf("failed to save commit: %v", err) + return err + } + mergeTestTree1CommitID = commit1.CommitID + + file2, err := fsmgr.NewSeafile(1, 10, nil) + if err != nil { + err := fmt.Errorf("failed to new seafile: %v", err) + return err + } + err = fsmgr.SaveSeafile(mergeTestRepoID, file2) + if err != nil { + err := fmt.Errorf("failed to save seafile: %v", err) + return err + } + + dent3 := fsmgr.SeafDirent{ID: file2.FileID, Name: "testfile", Mode: modeFile, Size: 10} + dir3, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + + dent4 := fsmgr.SeafDirent{ID: dir3, Name: "bbb", Mode: modeDir} + dir4, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent4}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + + mergeTestTree2 = dir4 + + commit2 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree2, "seafile", "this is the second commit.\n") + err = commitmgr.Save(commit2) + if err != nil { + err := fmt.Errorf("failed to save commit: %v", err) + return err + } + mergeTestTree2CommitID = commit2.CommitID + + dir5, err := mergeTestCreateSeafdir(nil) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + + dent6 := fsmgr.SeafDirent{ID: dir5, Name: "bbb", Mode: modeDir} + dir6, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent6}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + + mergeTestTree3 = dir6 + + commit3 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree3, "seafile", "this is the third commit.\n") + err = commitmgr.Save(commit3) + if err != nil { + err := fmt.Errorf("failed to save commit: %v", err) + return err + } + mergeTestTree3CommitID = commit3.CommitID + + file3, err := fsmgr.NewSeafile(1, 100, nil) + if err != nil { + err := fmt.Errorf("failed to new seafile: %v", err) + return err + } + err = fsmgr.SaveSeafile(mergeTestRepoID, file3) + if err != nil { + err := fmt.Errorf("failed to save seafile: %v", err) + return err + } + dent7 := fsmgr.SeafDirent{ID: file3.FileID, Name: "testfile", Mode: modeFile, Size: 100} + dir7, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent7}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + + dent8 := fsmgr.SeafDirent{ID: dir7, Name: "bbb", Mode: modeDir} + dir8, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent8}) + if err != nil { + err := fmt.Errorf("failed to get seafdir: %v", err) + return err + } + + mergeTestTree4 = dir8 + + commit4 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree3, "seafile", "this is the fourth commit.\n") + err = commitmgr.Save(commit4) + if err != nil { + err := fmt.Errorf("failed to save commit: %v", err) + return err + } + mergeTestTree4CommitID = commit4.CommitID + + return nil +} + +func mergeTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) { + seafdir, err := fsmgr.NewSeafdir(1, dents) + if err != nil { + err := fmt.Errorf("failed to new seafdir: %v", err) + return "", err + } + err = fsmgr.SaveSeafdir(mergeTestRepoID, seafdir) + if err != nil { + return "", err + } + + return seafdir.DirID, nil +} + +func mergeTestDelFile() error { + err := os.RemoveAll(mergeTestSeafileConfPath) + if err != nil { + return err + } + + return nil +} + +func TestMergeTrees(t *testing.T) { + commitmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir) + fsmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir) + err := mergeTestCreateTestDir() + if err != nil { + fmt.Printf("failed to create test dir: %v", err) + os.Exit(1) + } + + t.Run("test1", testMergeTrees1) + t.Run("test2", testMergeTrees2) + t.Run("test3", testMergeTrees3) + t.Run("test4", testMergeTrees4) + t.Run("test5", testMergeTrees5) + t.Run("test6", testMergeTrees6) + t.Run("test7", testMergeTrees7) + t.Run("test8", testMergeTrees8) + t.Run("test9", testMergeTrees9) + t.Run("test10", testMergeTrees10) + t.Run("test11", testMergeTrees11) + t.Run("test12", testMergeTrees12) + + err = mergeTestDelFile() + if err != nil { + fmt.Printf("failed to remove test file : %v", err) + os.Exit(1) + } +} + +// head add file +func testMergeTrees1(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree3, mergeTestTree2, mergeTestTree3} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + + if opt.mergedRoot != mergeTestTree2 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) + } +} + +// remote add file +func testMergeTrees2(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree3, mergeTestTree3, mergeTestTree2} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + + if opt.mergedRoot != mergeTestTree2 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) + } +} + +// head modify file +func testMergeTrees3(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree1} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + + if opt.mergedRoot != mergeTestTree2 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) + } +} + +// remote modify file +func testMergeTrees4(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree1, mergeTestTree1, mergeTestTree2} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + + if opt.mergedRoot != mergeTestTree2 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) + } +} + +// head and remote add file +func testMergeTrees5(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree2} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + if !opt.conflict { + t.Errorf("merge error %s.\n", opt.mergedRoot) + } +} + +// head and remote modify file +func testMergeTrees6(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree4} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + if !opt.conflict { + t.Errorf("merge error %s.\n", opt.mergedRoot) + } +} + +// head modify file and remote delete file +func testMergeTrees7(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree3} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + if opt.mergedRoot != mergeTestTree2 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) + } +} + +// head delete file and remote modify file +func testMergeTrees8(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree1, mergeTestTree3, mergeTestTree2} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + if opt.mergedRoot != mergeTestTree2 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) + } +} + +// head modify file and remote delete dir of this file +func testMergeTrees9(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree5} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + if opt.mergedRoot != mergeTestTree2 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) + } +} + +// remote modify file and head delete dir of this file +func testMergeTrees10(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree1, mergeTestTree5, mergeTestTree2} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + if opt.mergedRoot != mergeTestTree2 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) + } +} + +// head add file and remote delete dir of thie file +func testMergeTrees11(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree5} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + if opt.mergedRoot != mergeTestTree1 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree1) + } +} + +// remote add file and head delete dir of this file +func testMergeTrees12(t *testing.T) { + commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) + if err != nil { + t.Errorf("failed to load commit.\n") + } + roots := []string{mergeTestTree3, mergeTestTree5, mergeTestTree1} + opt := new(mergeOptions) + opt.remoteRepoID = mergeTestRepoID + opt.remoteHead = commit.CommitID + + err = mergeTrees(mergeTestRepoID, roots, opt) + if err != nil { + t.Errorf("failed to merge.\n") + } + if opt.mergedRoot != mergeTestTree1 { + t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree1) + } +} diff --git a/fileserver/objstore/backend_fs.go b/fileserver/objstore/backend_fs.go new file mode 100644 index 0000000..ec0e60b --- /dev/null +++ b/fileserver/objstore/backend_fs.go @@ -0,0 +1,99 @@ +// Implementation of file system storage backend. +package objstore + +import ( + "io" + "io/ioutil" + "os" + "path" +) + +type fsBackend struct { + // Path of the object directory + objDir string + objType string + tmpDir string +} + +func newFSBackend(seafileDataDir string, objType string) (*fsBackend, error) { + objDir := path.Join(seafileDataDir, "storage", objType) + err := os.MkdirAll(objDir, os.ModePerm) + if err != nil { + return nil, err + } + tmpDir := path.Join(seafileDataDir, "tmpfiles") + err = os.MkdirAll(tmpDir, os.ModePerm) + if err != nil { + return nil, err + } + backend := new(fsBackend) + backend.objDir = objDir + backend.objType = objType + backend.tmpDir = tmpDir + return backend, nil +} + +func (b *fsBackend) read(repoID string, objID string, w io.Writer) error { + p := path.Join(b.objDir, repoID, objID[:2], objID[2:]) + fd, err := os.Open(p) + if err != nil { + return err + } + defer fd.Close() + + _, err = io.Copy(w, fd) + if err != nil { + return err + } + + return nil +} + +func (b *fsBackend) write(repoID string, objID string, r io.Reader, sync bool) error { + parentDir := path.Join(b.objDir, repoID, objID[:2]) + p := path.Join(parentDir, objID[2:]) + err := os.MkdirAll(parentDir, os.ModePerm) + if err != nil { + return err + } + + tFile, err := ioutil.TempFile(b.tmpDir, objID) + if err != nil { + return err + } + defer os.Remove(tFile.Name()) + defer tFile.Close() + + _, err = io.Copy(tFile, r) + if err != nil { + return err + } + + err = os.Rename(tFile.Name(), p) + if err != nil { + return err + } + + return nil +} + +func (b *fsBackend) exists(repoID string, objID string) (bool, error) { + path := path.Join(b.objDir, repoID, objID[:2], objID[2:]) + _, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, err + } + return true, err + } + return true, nil +} + +func (b *fsBackend) stat(repoID string, objID string) (int64, error) { + path := path.Join(b.objDir, repoID, objID[:2], objID[2:]) + fileInfo, err := os.Stat(path) + if err != nil { + return -1, err + } + return fileInfo.Size(), nil +} diff --git a/fileserver/objstore/objstore.go b/fileserver/objstore/objstore.go new file mode 100644 index 0000000..bbaabba --- /dev/null +++ b/fileserver/objstore/objstore.go @@ -0,0 +1,56 @@ +// Package objstore provides operations for commit, fs and block objects. +// It is low-level package used by commitmgr, fsmgr, blockmgr packages to access storage. +package objstore + +import ( + "io" +) + +// ObjectStore is a container to access storage backend +type ObjectStore struct { + // can be "commit", "fs", or "block" + ObjType string + backend storageBackend +} + +// storageBackend is the interface implemented by storage backends. +// An object store may have one or multiple storage backends. +type storageBackend interface { + // Read an object from backend and write the contents into w. + read(repoID string, objID string, w io.Writer) (err error) + // Write the contents from r to the object. + write(repoID string, objID string, r io.Reader, sync bool) (err error) + // exists checks whether an object exists. + exists(repoID string, objID string) (res bool, err error) + // stat calculates an object's size + stat(repoID string, objID string) (res int64, err error) +} + +// New returns a new object store for a given type of objects. +// objType can be "commit", "fs", or "block". +func New(seafileConfPath string, seafileDataDir string, objType string) *ObjectStore { + obj := new(ObjectStore) + obj.ObjType = objType + obj.backend, _ = newFSBackend(seafileDataDir, objType) + return obj +} + +//Read data from storage backends. +func (s *ObjectStore) Read(repoID string, objID string, w io.Writer) (err error) { + return s.backend.read(repoID, objID, w) +} + +//Write data to storage backends. +func (s *ObjectStore) Write(repoID string, objID string, r io.Reader, sync bool) (err error) { + return s.backend.write(repoID, objID, r, sync) +} + +//Check whether object exists. +func (s *ObjectStore) Exists(repoID string, objID string) (res bool, err error) { + return s.backend.exists(repoID, objID) +} + +// Stat calculates object size. +func (s *ObjectStore) Stat(repoID string, objID string) (res int64, err error) { + return s.backend.stat(repoID, objID) +} diff --git a/fileserver/objstore/objstore_test.go b/fileserver/objstore/objstore_test.go new file mode 100644 index 0000000..bf5c3cd --- /dev/null +++ b/fileserver/objstore/objstore_test.go @@ -0,0 +1,105 @@ +package objstore + +import ( + "fmt" + "os" + "path" + "testing" +) + +const ( + testFile = "output.data" + seafileConfPath = "/tmp/conf" + seafileDataDir = "/tmp/conf/seafile-data" + repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" + objID = "0401fc662e3bc87a41f299a907c056aaf8322a27" +) + +func createFile() error { + outputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return err + } + defer outputFile.Close() + + outputString := "hello world!\n" + for i := 0; i < 10; i++ { + outputFile.WriteString(outputString) + } + + return nil +} + +func delFile() error { + err := os.Remove(testFile) + if err != nil { + return err + } + + err = os.RemoveAll(seafileConfPath) + if err != nil { + return err + } + + return nil +} + +func TestMain(m *testing.M) { + err := createFile() + if err != nil { + fmt.Printf("Failed to create test file : %v\n", err) + os.Exit(1) + } + code := m.Run() + err = delFile() + if err != nil { + fmt.Printf("Failed to remove test file : %v\n", err) + os.Exit(1) + } + os.Exit(code) +} + +func testWrite(t *testing.T) { + inputFile, err := os.Open(testFile) + if err != nil { + t.Errorf("Failed to open test file : %v\n", err) + } + defer inputFile.Close() + + bend := New(seafileConfPath, seafileDataDir, "commit") + bend.Write(repoID, objID, inputFile, true) +} + +func testRead(t *testing.T) { + outputFile, err := os.OpenFile(testFile, os.O_WRONLY, 0666) + if err != nil { + t.Errorf("Failed to open test file:%v\n", err) + } + defer outputFile.Close() + + bend := New(seafileConfPath, seafileDataDir, "commit") + err = bend.Read(repoID, objID, outputFile) + if err != nil { + t.Errorf("Failed to read backend : %s\n", err) + } +} + +func testExists(t *testing.T) { + bend := New(seafileConfPath, seafileDataDir, "commit") + ret, _ := bend.Exists(repoID, objID) + if !ret { + t.Errorf("File is not exist\n") + } + + filePath := path.Join(seafileDataDir, "storage", "commit", repoID, objID[:2], objID[2:]) + fileInfo, _ := os.Stat(filePath) + if fileInfo.Size() != 130 { + t.Errorf("File is exist, but the size of file is incorrect.\n") + } +} + +func TestObjStore(t *testing.T) { + testWrite(t) + testRead(t) + testExists(t) +} diff --git a/fileserver/quota.go b/fileserver/quota.go new file mode 100644 index 0000000..b92313e --- /dev/null +++ b/fileserver/quota.go @@ -0,0 +1,173 @@ +package main + +import ( + "database/sql" + "fmt" + "github.com/haiwen/seafile-server/fileserver/repomgr" + "gopkg.in/ini.v1" + "path/filepath" + "strconv" + "strings" +) + +// InfiniteQuota indicates that the quota is unlimited. +const ( + InfiniteQuota = -2 +) + +func checkQuota(repoID string, delta int64) (int, error) { + if repoID == "" { + err := fmt.Errorf("bad argumets") + return -1, err + } + + vInfo, err := repomgr.GetVirtualRepoInfo(repoID) + if err != nil { + err := fmt.Errorf("failed to get virtual repo: %v", err) + return -1, err + } + rRepoID := repoID + if vInfo != nil { + rRepoID = vInfo.OriginRepoID + } + + user, err := repomgr.GetRepoOwner(rRepoID) + if err != nil { + err := fmt.Errorf("failed to get repo owner: %v", err) + return -1, err + } + if user == "" { + err := fmt.Errorf("repo %s has no owner", repoID) + return -1, err + } + quota, err := getUserQuota(user) + if err != nil { + err := fmt.Errorf("failed to get user quota: %v", err) + return -1, err + } + + if quota == InfiniteQuota { + return 0, nil + } + usage, err := getUserUsage(user) + if err != nil || usage < 0 { + err := fmt.Errorf("failed to get user usage: %v", err) + return -1, err + } + usage += delta + if usage >= quota { + return 1, nil + } + + return 0, nil +} + +func getUserQuota(user string) (int64, error) { + var quota int64 + sqlStr := "SELECT quota FROM UserQuota WHERE user=?" + row := seafileDB.QueryRow(sqlStr, user) + if err := row.Scan("a); err != nil { + if err != sql.ErrNoRows { + return -1, err + } + } + + if quota <= 0 { + quota = getDefaultQuota() + } + + return quota, nil +} + +// Storage unit. +const ( + KB = 1000 + MB = 1000000 + GB = 1000000000 + TB = 1000000000000 +) + +func getDefaultQuota() int64 { + seafileConfPath := filepath.Join(absDataDir, "seafile.conf") + config, err := ini.Load(seafileConfPath) + if err != nil { + return InfiniteQuota + } + var quota int64 + section, err := config.GetSection("quota") + if err != nil { + return InfiniteQuota + } + key, err := section.GetKey("default") + if err != nil { + return InfiniteQuota + } + quotaStr := key.String() + quota = parseQuota(quotaStr) + + return quota +} + +func parseQuota(quotaStr string) int64 { + var quota int64 + var multiplier int64 = GB + if end := strings.Index(quotaStr, "kb"); end > 0 { + multiplier = KB + quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0) + if err != nil { + return InfiniteQuota + } + quota = quotaInt * multiplier + } else if end := strings.Index(quotaStr, "mb"); end > 0 { + multiplier = MB + quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0) + if err != nil { + return InfiniteQuota + } + quota = quotaInt * multiplier + } else if end := strings.Index(quotaStr, "gb"); end > 0 { + multiplier = GB + quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0) + if err != nil { + return InfiniteQuota + } + quota = quotaInt * multiplier + } else if end := strings.Index(quotaStr, "tb"); end > 0 { + multiplier = TB + quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0) + if err != nil { + return InfiniteQuota + } + quota = quotaInt * multiplier + } else { + quotaInt, err := strconv.ParseInt(quotaStr, 10, 0) + if err != nil { + return InfiniteQuota + } + quota = quotaInt * multiplier + } + + return quota +} + +func getUserUsage(user string) (int64, error) { + var usage sql.NullInt64 + sqlStr := "SELECT SUM(size) FROM " + + "RepoOwner o LEFT JOIN VirtualRepo v ON o.repo_id=v.repo_id, " + + "RepoSize WHERE " + + "owner_id=? AND o.repo_id=RepoSize.repo_id " + + "AND v.repo_id IS NULL" + + row := seafileDB.QueryRow(sqlStr, user) + if err := row.Scan(&usage); err != nil { + if err != sql.ErrNoRows { + return -1, err + } + } + + if usage.Valid { + return usage.Int64, nil + } + + return 0, nil +} diff --git a/fileserver/repomgr/repomgr.go b/fileserver/repomgr/repomgr.go new file mode 100644 index 0000000..05b67d1 --- /dev/null +++ b/fileserver/repomgr/repomgr.go @@ -0,0 +1,643 @@ +// Package repomgr manages repo objects and file operations in repos. +package repomgr + +import ( + "database/sql" + "fmt" + "log" + "time" + + // Change to non-blank imports when use + _ "github.com/haiwen/seafile-server/fileserver/blockmgr" + "github.com/haiwen/seafile-server/fileserver/commitmgr" +) + +// Repo status +const ( + RepoStatusNormal = iota + RepoStatusReadOnly + NRepoStatus +) + +// Repo contains information about a repo. +type Repo struct { + ID string + Name string + Desc string + LastModifier string + LastModificationTime int64 + HeadCommitID string + RootID string + IsCorrupted bool + + // Set when repo is virtual + VirtualInfo *VRepoInfo + + // ID for fs and block store + StoreID string + + // Encrypted repo info + IsEncrypted bool + EncVersion int + Magic string + RandomKey string + Salt string + Version int +} + +// VRepoInfo contains virtual repo information. +type VRepoInfo struct { + RepoID string + OriginRepoID string + Path string + BaseCommitID string +} + +var seafileDB *sql.DB + +// Init initialize status of repomgr package +func Init(seafDB *sql.DB) { + seafileDB = seafDB +} + +// Get returns Repo object by repo ID. +func Get(id string) *Repo { + query := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` + + `Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` + + `LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` + + `WHERE r.repo_id = ? AND b.name = 'master'` + + stmt, err := seafileDB.Prepare(query) + if err != nil { + log.Printf("failed to prepare sql : %s :%v", query, err) + return nil + } + defer stmt.Close() + + rows, err := stmt.Query(id) + if err != nil { + log.Printf("failed to query sql : %v", err) + return nil + } + defer rows.Close() + + repo := new(Repo) + + var originRepoID sql.NullString + var path sql.NullString + var baseCommitID sql.NullString + if rows.Next() { + err := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID) + if err != nil { + log.Printf("failed to scan sql rows : %v", err) + return nil + } + } else { + return nil + } + + if repo.HeadCommitID == "" { + log.Printf("repo %s is corrupted", id) + return nil + } + + if originRepoID.Valid { + repo.VirtualInfo = new(VRepoInfo) + repo.VirtualInfo.OriginRepoID = originRepoID.String + repo.StoreID = originRepoID.String + + if path.Valid { + repo.VirtualInfo.Path = path.String + } + + if baseCommitID.Valid { + repo.VirtualInfo.BaseCommitID = baseCommitID.String + } + } else { + repo.StoreID = repo.ID + } + + commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + log.Printf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err) + return nil + } + + repo.Name = commit.RepoName + repo.Desc = commit.RepoDesc + repo.LastModifier = commit.CreatorName + repo.LastModificationTime = commit.Ctime + repo.RootID = commit.RootID + repo.Version = commit.Version + if commit.Encrypted == "true" { + repo.IsEncrypted = true + repo.EncVersion = commit.EncVersion + if repo.EncVersion == 1 { + repo.Magic = commit.Magic + } else if repo.EncVersion == 2 { + repo.Magic = commit.Magic + repo.RandomKey = commit.RandomKey + } else if repo.EncVersion == 3 { + repo.Magic = commit.Magic + repo.RandomKey = commit.RandomKey + repo.Salt = commit.Salt + } + } + + return repo +} + +// RepoToCommit converts Repo to Commit. +func RepoToCommit(repo *Repo, commit *commitmgr.Commit) { + commit.RepoID = repo.ID + commit.RepoName = repo.Name + if repo.IsEncrypted { + commit.Encrypted = "true" + commit.EncVersion = repo.EncVersion + if repo.EncVersion == 1 { + commit.Magic = repo.Magic + } else if repo.EncVersion == 2 { + commit.Magic = repo.Magic + commit.RandomKey = repo.RandomKey + } else if repo.EncVersion == 3 { + commit.Magic = repo.Magic + commit.RandomKey = repo.RandomKey + commit.Salt = repo.Salt + } + } else { + commit.Encrypted = "false" + } + commit.Version = repo.Version + + return +} + +// GetEx return repo object even if it's corrupted. +func GetEx(id string) *Repo { + query := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` + + `Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` + + `LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` + + `WHERE r.repo_id = ? AND b.name = 'master'` + + stmt, err := seafileDB.Prepare(query) + if err != nil { + log.Printf("failed to prepare sql : %s :%v", query, err) + return nil + } + defer stmt.Close() + + rows, err := stmt.Query(id) + if err != nil { + log.Printf("failed to query sql : %v", err) + return nil + } + defer rows.Close() + + repo := new(Repo) + + var originRepoID sql.NullString + var path sql.NullString + var baseCommitID sql.NullString + if rows.Next() { + err := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID) + if err != nil { + log.Printf("failed to scan sql rows : %v", err) + return nil + } + } else { + return nil + } + if originRepoID.Valid { + repo.VirtualInfo = new(VRepoInfo) + repo.VirtualInfo.OriginRepoID = originRepoID.String + repo.StoreID = originRepoID.String + + if path.Valid { + repo.VirtualInfo.Path = path.String + } + + if baseCommitID.Valid { + repo.VirtualInfo.BaseCommitID = baseCommitID.String + } + } else { + repo.StoreID = repo.ID + } + + if repo.HeadCommitID == "" { + repo.IsCorrupted = true + return repo + } + + commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + log.Printf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err) + repo.IsCorrupted = true + return nil + } + + repo.Name = commit.RepoName + repo.LastModifier = commit.CreatorName + repo.LastModificationTime = commit.Ctime + repo.RootID = commit.RootID + repo.Version = commit.Version + if commit.Encrypted == "true" { + repo.IsEncrypted = true + repo.EncVersion = commit.EncVersion + if repo.EncVersion == 1 { + repo.Magic = commit.Magic + } else if repo.EncVersion == 2 { + repo.Magic = commit.Magic + repo.RandomKey = commit.RandomKey + } else if repo.EncVersion == 3 { + repo.Magic = commit.Magic + repo.RandomKey = commit.RandomKey + repo.Salt = commit.Salt + } + } + + return repo +} + +// GetVirtualRepoInfo return virtual repo info by repo id. +func GetVirtualRepoInfo(repoID string) (*VRepoInfo, error) { + sqlStr := "SELECT repo_id, origin_repo, path, base_commit FROM VirtualRepo WHERE repo_id = ?" + vRepoInfo := new(VRepoInfo) + + row := seafileDB.QueryRow(sqlStr, repoID) + if err := row.Scan(&vRepoInfo.RepoID, &vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil { + if err != sql.ErrNoRows { + return nil, err + } + return nil, nil + } + return vRepoInfo, nil +} + +// GetVirtualRepoInfoByOrigin return virtual repo info by origin repo id. +func GetVirtualRepoInfoByOrigin(originRepo string) ([]*VRepoInfo, error) { + sqlStr := "SELECT repo_id, origin_repo, path, base_commit " + + "FROM VirtualRepo WHERE origin_repo=?" + var vRepos []*VRepoInfo + row, err := seafileDB.Query(sqlStr, originRepo) + if err != nil { + return nil, err + } + defer row.Close() + for row.Next() { + vRepoInfo := new(VRepoInfo) + if err := row.Scan(&vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil { + if err != sql.ErrNoRows { + return nil, err + } + } + vRepos = append(vRepos, vRepoInfo) + } + + return vRepos, nil +} + +// GetEmailByToken return user's email by token. +func GetEmailByToken(repoID string, token string) (string, error) { + var email string + sqlStr := "SELECT email FROM RepoUserToken WHERE repo_id = ? AND token = ?" + + row := seafileDB.QueryRow(sqlStr, repoID, token) + if err := row.Scan(&email); err != nil { + if err != sql.ErrNoRows { + return email, err + } + } + return email, nil +} + +// GetRepoStatus return repo status by repo id. +func GetRepoStatus(repoID string) (int, error) { + var status int + sqlStr := "SELECT status FROM RepoInfo WHERE repo_id=?" + + row := seafileDB.QueryRow(sqlStr, repoID) + if err := row.Scan(&status); err != nil { + if err != sql.ErrNoRows { + return status, err + } + } + return status, nil +} + +// TokenPeerInfoExists check if the token exists. +func TokenPeerInfoExists(token string) (bool, error) { + var exists string + sqlStr := "SELECT token FROM RepoTokenPeerInfo WHERE token=?" + + row := seafileDB.QueryRow(sqlStr, token) + if err := row.Scan(&exists); err != nil { + if err != sql.ErrNoRows { + return false, err + } + return false, nil + } + return true, nil +} + +// AddTokenPeerInfo add token peer info to RepoTokenPeerInfo table. +func AddTokenPeerInfo(token, peerID, peerIP, peerName, clientVer string, syncTime int64) error { + sqlStr := "INSERT INTO RepoTokenPeerInfo (token, peer_id, peer_ip, peer_name, sync_time, client_ver)" + + "VALUES (?, ?, ?, ?, ?, ?)" + + if _, err := seafileDB.Exec(sqlStr, token, peerID, peerIP, peerName, syncTime, clientVer); err != nil { + return err + } + return nil +} + +// UpdateTokenPeerInfo update token peer info to RepoTokenPeerInfo table. +func UpdateTokenPeerInfo(token, peerID, clientVer string, syncTime int64) error { + sqlStr := "UPDATE RepoTokenPeerInfo SET " + + "peer_ip=?, sync_time=?, client_ver=? WHERE token=?" + if _, err := seafileDB.Exec(sqlStr, peerID, syncTime, clientVer, token); err != nil { + return err + } + return nil +} + +// GetUploadTmpFile gets the timp file path of upload file. +func GetUploadTmpFile(repoID, filePath string) (string, error) { + var filePathNoSlash string + if filePath[0] == '/' { + filePathNoSlash = filePath[1:] + } else { + filePathNoSlash = filePath + filePath = "/" + filePath + } + + var tmpFile string + sqlStr := "SELECT tmp_file_path FROM WebUploadTempFiles WHERE repo_id = ? AND file_path = ?" + + row := seafileDB.QueryRow(sqlStr, repoID, filePath) + if err := row.Scan(&tmpFile); err != nil { + if err != sql.ErrNoRows { + return "", err + } + } + if tmpFile == "" { + row := seafileDB.QueryRow(sqlStr, repoID, filePathNoSlash) + if err := row.Scan(&tmpFile); err != nil { + if err != sql.ErrNoRows { + return "", err + } + } + } + + return tmpFile, nil +} + +// AddUploadTmpFile adds the tmp file path of upload file. +func AddUploadTmpFile(repoID, filePath, tmpFile string) error { + if filePath[0] != '/' { + filePath = "/" + filePath + } + + sqlStr := "INSERT INTO WebUploadTempFiles (repo_id, file_path, tmp_file_path) VALUES (?, ?, ?)" + + _, err := seafileDB.Exec(sqlStr, repoID, filePath, tmpFile) + if err != nil { + return err + } + + return nil +} + +// DelUploadTmpFile deletes the tmp file path of upload file. +func DelUploadTmpFile(repoID, filePath string) error { + var filePathNoSlash string + if filePath[0] == '/' { + filePathNoSlash = filePath[1:] + } else { + filePathNoSlash = filePath + filePath = "/" + filePath + } + + sqlStr := "DELETE FROM WebUploadTempFiles WHERE repo_id = ? AND file_path IN (?, ?)" + + _, err := seafileDB.Exec(sqlStr, repoID, filePath, filePathNoSlash) + if err != nil { + return err + } + + return nil +} + +// SetRepoCommitToDb updates the table of RepoInfo. +func SetRepoCommitToDb(repoID, repoName string, updateTime int64, version int, isEncrypted string, lastModifier string) error { + var exists int + var encrypted int + + sqlStr := "SELECT 1 FROM RepoInfo WHERE repo_id=?" + row := seafileDB.QueryRow(sqlStr, repoID) + if err := row.Scan(&exists); err != nil { + if err != sql.ErrNoRows { + return err + } + } + if updateTime == 0 { + updateTime = time.Now().Unix() + } + + if isEncrypted == "true" { + encrypted = 1 + } + + if exists == 1 { + sqlStr := "UPDATE RepoInfo SET name=?, update_time=?, version=?, is_encrypted=?, " + + "last_modifier=? WHERE repo_id=?" + if _, err := seafileDB.Exec(sqlStr, repoName, updateTime, version, encrypted, lastModifier, repoID); err != nil { + return err + } + } else { + sqlStr := "INSERT INTO RepoInfo (repo_id, name, update_time, version, is_encrypted, last_modifier) " + + "VALUES (?, ?, ?, ?, ?, ?)" + if _, err := seafileDB.Exec(sqlStr, repoID, repoName, updateTime, version, encrypted, lastModifier); err != nil { + return err + } + } + + return nil +} + +// SetVirtualRepoBaseCommitPath updates the table of VirtualRepo. +func SetVirtualRepoBaseCommitPath(repoID, baseCommitID, newPath string) error { + sqlStr := "UPDATE VirtualRepo SET base_commit=?, path=? WHERE repo_id=?" + if _, err := seafileDB.Exec(sqlStr, baseCommitID, newPath, repoID); err != nil { + return err + } + return nil +} + +// GetVirtualRepoIDsByOrigin return the virtual repo ids by origin repo id. +func GetVirtualRepoIDsByOrigin(repoID string) ([]string, error) { + sqlStr := "SELECT repo_id FROM VirtualRepo WHERE origin_repo=?" + + var id string + var ids []string + row, err := seafileDB.Query(sqlStr, repoID) + if err != nil { + return nil, err + } + defer row.Close() + for row.Next() { + if err := row.Scan(&id); err != nil { + if err != sql.ErrNoRows { + return nil, err + } + } + ids = append(ids, id) + } + + return ids, nil +} + +// DelVirtualRepo deletes virtual repo from database. +func DelVirtualRepo(repoID string, cloudMode bool) error { + err := removeVirtualRepoOndisk(repoID, cloudMode) + if err != nil { + err := fmt.Errorf("failed to remove virtual repo on disk: %v", err) + return err + } + sqlStr := "DELETE FROM VirtualRepo WHERE repo_id = ?" + _, err = seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + + return nil +} + +func removeVirtualRepoOndisk(repoID string, cloudMode bool) error { + sqlStr := "DELETE FROM Repo WHERE repo_id = ?" + _, err := seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + sqlStr = "SELECT name, repo_id, commit_id FROM Branch WHERE repo_id=?" + rows, err := seafileDB.Query(sqlStr, repoID) + if err != nil { + return err + } + defer rows.Close() + for rows.Next() { + var name, id, commitID string + if err := rows.Scan(&name, &id, &commitID); err != nil { + if err != sql.ErrNoRows { + return err + } + } + sqlStr := "DELETE FROM RepoHead WHERE branch_name = ? AND repo_id = ?" + _, err := seafileDB.Exec(sqlStr, name, id) + if err != nil { + return err + } + sqlStr = "DELETE FROM Branch WHERE name=? AND repo_id=?" + _, err = seafileDB.Exec(sqlStr, name, id) + if err != nil { + return err + } + } + + sqlStr = "DELETE FROM RepoOwner WHERE repo_id = ?" + _, err = seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + + sqlStr = "DELETE FROM SharedRepo WHERE repo_id = ?" + _, err = seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + + sqlStr = "DELETE FROM RepoGroup WHERE repo_id = ?" + _, err = seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + if !cloudMode { + sqlStr = "DELETE FROM InnerPubRepo WHERE repo_id = ?" + _, err := seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + } + + sqlStr = "DELETE FROM RepoUserToken WHERE repo_id = ?" + _, err = seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + + sqlStr = "DELETE FROM RepoValidSince WHERE repo_id = ?" + _, err = seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + + sqlStr = "DELETE FROM RepoSize WHERE repo_id = ?" + _, err = seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + + var exists int + sqlStr = "SELECT 1 FROM GarbageRepos WHERE repo_id=?" + row := seafileDB.QueryRow(sqlStr, repoID) + if err := row.Scan(&exists); err != nil { + if err != sql.ErrNoRows { + return err + } + } + if exists == 0 { + sqlStr = "INSERT INTO GarbageRepos (repo_id) VALUES (?)" + _, err := seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + } else { + sqlStr = "REPLACE INTO GarbageRepos (repo_id) VALUES (?)" + _, err := seafileDB.Exec(sqlStr, repoID) + if err != nil { + return err + } + } + + return nil +} + +// IsVirtualRepo check if the repo is a virtual reop. +func IsVirtualRepo(repoID string) (bool, error) { + var exists int + sqlStr := "SELECT 1 FROM VirtualRepo WHERE repo_id = ?" + + row := seafileDB.QueryRow(sqlStr, repoID) + if err := row.Scan(&exists); err != nil { + if err != sql.ErrNoRows { + return false, err + } + return false, nil + } + return true, nil + +} + +// GetRepoOwner get the owner of repo. +func GetRepoOwner(repoID string) (string, error) { + var owner string + sqlStr := "SELECT owner_id FROM RepoOwner WHERE repo_id=?" + + row := seafileDB.QueryRow(sqlStr, repoID) + if err := row.Scan(&owner); err != nil { + if err != sql.ErrNoRows { + return "", err + } + } + + return owner, nil +} diff --git a/fileserver/repomgr/repomgr_test.go b/fileserver/repomgr/repomgr_test.go new file mode 100644 index 0000000..8d35115 --- /dev/null +++ b/fileserver/repomgr/repomgr_test.go @@ -0,0 +1,84 @@ +package repomgr + +import ( + "database/sql" + "fmt" + "os" + "testing" + + _ "github.com/go-sql-driver/mysql" + "github.com/haiwen/seafile-server/fileserver/commitmgr" + "github.com/haiwen/seafile-server/fileserver/searpc" +) + +const ( + // repoID = "9646f13e-bbab-4eaf-9a84-fb6e1cd776b3" + user = "seafile" + password = "seafile" + host = "127.0.0.1" + port = 3306 + dbName = "seafile-db" + useTLS = false + seafileConfPath = "/root/conf" + seafileDataDir = "/root/conf/seafile-data" + repoName = "repo" + userName = "seafile@seafile.com" + encVersion = 2 + pipePath = "/root/runtime/seafile.sock" + service = "seafserv-threaded-rpcserver" +) + +var repoID string +var client *searpc.Client + +func createRepo() string { + id, err := client.Call("seafile_create_repo", repoName, "", userName, nil, encVersion) + if err != nil { + fmt.Printf("failed to create repo.\n") + } + if id == nil { + fmt.Printf("repo id is nil.\n") + os.Exit(1) + } + + repoid, ok := id.(string) + if !ok { + fmt.Printf("returned value isn't repo id.\n") + } + return repoid +} + +func delRepo() { + _, err := client.Call("seafile_destroy_repo", repoID) + if err != nil { + fmt.Printf("failed to del repo.\n") + os.Exit(1) + } +} + +func TestMain(m *testing.M) { + client = searpc.Init(pipePath, service) + repoID = createRepo() + dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t", user, password, host, port, dbName, useTLS) + seafDB, err := sql.Open("mysql", dsn) + if err != nil { + fmt.Printf("Failed to open database: %v", err) + } + Init(seafDB) + commitmgr.Init(seafileConfPath, seafileDataDir) + code := m.Run() + delRepo() + os.Exit(code) +} + +func TestGet(t *testing.T) { + repo := Get(repoID) + if repo == nil { + t.Errorf("failed to get repo : %s.\n", repoID) + t.FailNow() + } + + if repo.ID != repoID { + t.Errorf("failed to get repo : %s.\n", repoID) + } +} diff --git a/fileserver/searpc/searpc.go b/fileserver/searpc/searpc.go new file mode 100644 index 0000000..64d6090 --- /dev/null +++ b/fileserver/searpc/searpc.go @@ -0,0 +1,122 @@ +// Package searpc implements searpc client protocol with unix pipe transport. +package searpc + +import ( + "bufio" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "net" +) + +// Client represents a connections to the RPC server. +type Client struct { + // path of the named pipe + pipePath string + // RPC service name + Service string +} + +type request struct { + Service string `json:"service"` + Request string `json:"request"` +} + +// Init initializes rpc client. +func Init(pipePath string, service string) *Client { + client := new(Client) + client.pipePath = pipePath + client.Service = service + + return client +} + +// Call calls the RPC function funcname with variadic parameters. +// The return value of the RPC function is return as interface{} type +// The true returned type can be int32, int64, string, struct (object), list of struct (objects) or JSON +func (c *Client) Call(funcname string, params ...interface{}) (interface{}, error) { + // TODO: use reflection to compose requests and parse results. + var unixAddr *net.UnixAddr + unixAddr, err := net.ResolveUnixAddr("unix", c.pipePath) + if err != nil { + err := fmt.Errorf("failed to resolve unix addr when calling rpc : %v", err) + return nil, err + } + + conn, err := net.DialUnix("unix", nil, unixAddr) + if err != nil { + err := fmt.Errorf("failed to dial unix when calling rpc : %v", err) + return nil, err + } + defer conn.Close() + + var req []interface{} + req = append(req, funcname) + req = append(req, params...) + jsonstr, err := json.Marshal(req) + if err != nil { + err := fmt.Errorf("failed to encode rpc call to json : %v", err) + return nil, err + } + + reqHeader := new(request) + reqHeader.Service = c.Service + reqHeader.Request = string(jsonstr) + + jsonstr, err = json.Marshal(reqHeader) + if err != nil { + err := fmt.Errorf("failed to convert object to json : %v", err) + return nil, err + } + + header := make([]byte, 4) + binary.LittleEndian.PutUint32(header, uint32(len(jsonstr))) + _, err = conn.Write([]byte(header)) + if err != nil { + err := fmt.Errorf("Failed to write rpc request header : %v", err) + return nil, err + } + + _, err = conn.Write([]byte(jsonstr)) + if err != nil { + err := fmt.Errorf("Failed to write rpc request body : %v", err) + return nil, err + } + + reader := bufio.NewReader(conn) + buflen := make([]byte, 4) + _, err = io.ReadFull(reader, buflen) + if err != nil { + err := fmt.Errorf("failed to read response header from rpc server : %v", err) + return nil, err + } + retlen := binary.LittleEndian.Uint32(buflen) + + msg := make([]byte, retlen) + _, err = io.ReadFull(reader, msg) + if err != nil { + err := fmt.Errorf("failed to read response body from rpc server : %v", err) + return nil, err + } + + retlist := make(map[string]interface{}) + err = json.Unmarshal(msg, &retlist) + if err != nil { + err := fmt.Errorf("failed to decode rpc response : %v", err) + return nil, err + } + + if _, ok := retlist["err_code"]; ok { + err := fmt.Errorf("searpc server returned error : %v", retlist["err_msg"]) + return nil, err + } + + if _, ok := retlist["ret"]; ok { + ret := retlist["ret"] + return ret, nil + } + + err = fmt.Errorf("No value returned") + return nil, err +} diff --git a/fileserver/searpc/searpc_test.go b/fileserver/searpc/searpc_test.go new file mode 100644 index 0000000..10b1819 --- /dev/null +++ b/fileserver/searpc/searpc_test.go @@ -0,0 +1,81 @@ +package searpc + +import ( + "os" + "testing" +) + +const ( + repoName = "repo" + userName = "seafile@seafile.com" + encVersion = 2 + pipePath = "/root/runtime/seafile.sock" + service = "seafserv-threaded-rpcserver" +) + +var client *Client + +func TestMain(m *testing.M) { + client = Init(pipePath, service) + code := m.Run() + os.Exit(code) +} + +func TestCallRpc(t *testing.T) { + repoID, err := client.Call("seafile_create_repo", repoName, "", userName, nil, encVersion) + if err != nil { + t.Errorf("failed to create repo.\n") + } + if repoID == nil { + t.Errorf("repo id is nil.\n") + t.FailNow() + } + + repo, err := client.Call("seafile_get_repo", repoID) + if err != nil { + t.Errorf("failed to get repo.\n") + } + if repo == nil { + t.Errorf("repo is nil.\n") + t.FailNow() + } + repoMap, ok := repo.(map[string]interface{}) + if !ok { + t.Errorf("failed to assert the type.\n") + t.FailNow() + } + if repoMap["id"] != repoID { + t.Errorf("wrong repo id.\n") + } + + repoList, err := client.Call("seafile_get_repo_list", -1, -1, "") + if err != nil { + t.Errorf("failed to get repo list.\n") + } + if repoList == nil { + t.Errorf("repo list is nil.\n") + t.FailNow() + } + var exists bool + repos, ok := repoList.([]interface{}) + if !ok { + t.Errorf("failed to assert the type.\n") + t.FailNow() + } + for _, v := range repos { + repo, ok := v.(map[string]interface{}) + if !ok { + t.Errorf("failed to assert the type.\n") + t.FailNow() + } + if repo["id"] == repoID { + exists = true + break + } + } + if exists != true { + t.Errorf("can't find repo %s in repo list.\n", repoID) + } + + client.Call("seafile_destroy_repo", repoID) +} diff --git a/fileserver/share/group/group.go b/fileserver/share/group/group.go new file mode 100644 index 0000000..ed90844 --- /dev/null +++ b/fileserver/share/group/group.go @@ -0,0 +1,2 @@ +// Package group manages group membership and group shares. +package group diff --git a/fileserver/share/public/public.go b/fileserver/share/public/public.go new file mode 100644 index 0000000..33af3f1 --- /dev/null +++ b/fileserver/share/public/public.go @@ -0,0 +1,2 @@ +// Package public manager inner public shares. +package public diff --git a/fileserver/share/share.go b/fileserver/share/share.go new file mode 100644 index 0000000..c5f364a --- /dev/null +++ b/fileserver/share/share.go @@ -0,0 +1,646 @@ +// Package share manages share relations. +// share: manages personal shares and provide high level permission check functions. +package share + +import ( + "database/sql" + "fmt" + "log" + "path/filepath" + "strconv" + "strings" + + "github.com/haiwen/seafile-server/fileserver/repomgr" +) + +type group struct { + id int + groupName string + creatorName string + timestamp int64 + parentGroupID int +} + +var ccnetDB *sql.DB +var seafileDB *sql.DB +var groupTableName string +var cloudMode bool + +// Init ccnetDB, seafileDB, groupTableName, cloudMode +func Init(cnDB *sql.DB, seafDB *sql.DB, grpTableName string, clMode bool) { + ccnetDB = cnDB + seafileDB = seafDB + groupTableName = grpTableName + cloudMode = clMode +} + +// CheckPerm get user's repo permission +func CheckPerm(repoID string, user string) string { + var perm string + vInfo, err := repomgr.GetVirtualRepoInfo(repoID) + if err != nil { + log.Printf("Failed to get virtual repo info by repo id %s: %v", repoID, err) + } + if vInfo != nil { + perm = checkVirtualRepoPerm(repoID, vInfo.OriginRepoID, user, vInfo.Path) + return perm + } + + perm = checkRepoSharePerm(repoID, user) + + return perm +} + +// GetGroupReposByUser get group repos by user +func GetGroupReposByUser(user string, orgID int) ([]*SharedRepo, error) { + groups, err := getGroupsByUser(user, true) + if err != nil { + return nil, err + } + if len(groups) == 0 { + return nil, nil + } + + var sqlBuilder strings.Builder + if orgID < 0 { + sqlBuilder.WriteString("SELECT g.repo_id, " + + "user_name, permission, commit_id, " + + "i.name, i.update_time, i.version " + + "FROM RepoGroup g " + + "LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, " + + "Branch b WHERE g.repo_id = b.repo_id AND " + + "b.name = 'master' AND group_id IN (") + } else { + sqlBuilder.WriteString("SELECT g.repo_id, " + + "owner, permission, commit_id, " + + "i.name, i.update_time, i.version " + + "FROM OrgGroupRepo g " + + "LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, " + + "Branch b WHERE g.repo_id = b.repo_id AND " + + "b.name = 'master' AND group_id IN (") + } + + for i := 0; i < len(groups); i++ { + sqlBuilder.WriteString(strconv.Itoa(groups[i].id)) + if i+1 < len(groups) { + sqlBuilder.WriteString(",") + } + } + sqlBuilder.WriteString(" ) ORDER BY group_id") + + rows, err := seafileDB.Query(sqlBuilder.String()) + if err != nil { + return nil, err + } + defer rows.Close() + + var repos []*SharedRepo + for rows.Next() { + gRepo := new(SharedRepo) + if err := rows.Scan(&gRepo.ID, &gRepo.Owner, + &gRepo.Permission, &gRepo.HeadCommitID, + &gRepo.Name, &gRepo.MTime, &gRepo.Version); err == nil { + + repos = append(repos, gRepo) + } + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return repos, nil +} + +func checkVirtualRepoPerm(repoID, originRepoID, user, vPath string) string { + owner, err := repomgr.GetRepoOwner(originRepoID) + if err != nil { + log.Printf("Failed to get repo owner: %v", err) + } + var perm string + if owner != "" && owner == user { + perm = "rw" + return perm + } + perm = checkPermOnParentRepo(originRepoID, user, vPath) + if perm != "" { + return perm + } + perm = checkRepoSharePerm(originRepoID, user) + return perm +} + +func getUserGroups(sqlStr string, args ...interface{}) ([]group, error) { + rows, err := ccnetDB.Query(sqlStr, args...) + if err != nil { + return nil, err + } + + defer rows.Close() + + var groups []group + var g group + for rows.Next() { + if err := rows.Scan(&g.id, &g.groupName, + &g.creatorName, &g.timestamp, + &g.parentGroupID); err == nil { + + groups = append(groups, g) + } + } + + if err := rows.Err(); err != nil { + return nil, err + } + return groups, nil +} + +func getGroupsByUser(userName string, returnAncestors bool) ([]group, error) { + sqlStr := fmt.Sprintf("SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "+ + "`%s` g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC", + groupTableName) + groups, err := getUserGroups(sqlStr, userName) + if err != nil { + err := fmt.Errorf("Failed to get groups by user %s: %v", userName, err) + return nil, err + } + if !returnAncestors { + return groups, nil + } + + sqlStr = "" + var ret []group + for _, group := range groups { + parentGroupID := group.parentGroupID + groupID := group.id + if parentGroupID != 0 { + if sqlStr == "" { + sqlStr = fmt.Sprintf("SELECT path FROM GroupStructure WHERE group_id IN (%d", + groupID) + } else { + sqlStr += fmt.Sprintf(", %d", groupID) + } + } else { + ret = append(ret, group) + } + } + if sqlStr != "" { + sqlStr += ")" + paths, err := getGroupPaths(sqlStr) + if err != nil { + log.Printf("Failed to get group paths: %v", err) + } + if paths == "" { + err := fmt.Errorf("Failed to get groups path for user %s", userName) + return nil, err + } + + sqlStr = fmt.Sprintf("SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "+ + "`%s` g WHERE g.group_id IN (%s) ORDER BY g.group_id DESC", + groupTableName, paths) + groups, err := getUserGroups(sqlStr) + if err != nil { + return nil, err + } + ret = append(ret, groups...) + } + return ret, nil +} + +func getGroupPaths(sqlStr string) (string, error) { + var paths string + rows, err := ccnetDB.Query(sqlStr) + if err != nil { + return paths, err + } + + defer rows.Close() + + var path string + for rows.Next() { + rows.Scan(&path) + if paths == "" { + paths = path + } else { + paths += fmt.Sprintf(", %s", path) + } + } + + if err := rows.Err(); err != nil { + return "", err + } + return paths, nil +} + +func checkGroupPermByUser(repoID string, userName string) (string, error) { + groups, err := getGroupsByUser(userName, false) + if err != nil { + return "", err + } + if len(groups) == 0 { + return "", nil + } + + var sqlBuilder strings.Builder + sqlBuilder.WriteString("SELECT permission FROM RepoGroup WHERE repo_id = ? AND group_id IN (") + for i := 0; i < len(groups); i++ { + sqlBuilder.WriteString(strconv.Itoa(groups[i].id)) + if i+1 < len(groups) { + sqlBuilder.WriteString(",") + } + } + sqlBuilder.WriteString(")") + + rows, err := seafileDB.Query(sqlBuilder.String(), repoID) + if err != nil { + err := fmt.Errorf("Failed to get group permission by user %s: %v", userName, err) + return "", err + } + + defer rows.Close() + + var perm string + var origPerm string + for rows.Next() { + if err := rows.Scan(&perm); err == nil { + if perm == "rw" { + origPerm = perm + } else if perm == "r" && origPerm == "" { + origPerm = perm + } + } + } + + if err := rows.Err(); err != nil { + err := fmt.Errorf("Failed to get group permission for user %s: %v", userName, err) + return "", err + } + + return origPerm, nil +} + +func checkSharedRepoPerm(repoID string, email string) (string, error) { + sqlStr := "SELECT permission FROM SharedRepo WHERE repo_id=? AND to_email=?" + row := seafileDB.QueryRow(sqlStr, repoID, email) + + var perm string + if err := row.Scan(&perm); err != nil { + if err != sql.ErrNoRows { + err := fmt.Errorf("Failed to check shared repo permission: %v", err) + return "", err + } + } + return perm, nil +} + +func checkInnerPubRepoPerm(repoID string) (string, error) { + sqlStr := "SELECT permission FROM InnerPubRepo WHERE repo_id=?" + row := seafileDB.QueryRow(sqlStr, repoID) + + var perm string + if err := row.Scan(&perm); err != nil { + if err != sql.ErrNoRows { + err := fmt.Errorf("Failed to check inner public repo permission: %v", err) + return "", err + } + } + + return perm, nil +} + +func checkRepoSharePerm(repoID string, userName string) string { + owner, err := repomgr.GetRepoOwner(repoID) + if err != nil { + log.Printf("Failed to get repo owner: %v", err) + } + if owner != "" && owner == userName { + perm := "rw" + return perm + } + perm, err := checkSharedRepoPerm(repoID, userName) + if err != nil { + log.Printf("Failed to get shared repo permission: %v", err) + } + if perm != "" { + return perm + } + perm, err = checkGroupPermByUser(repoID, userName) + if err != nil { + log.Printf("Failed to get group permission by user %s: %v", userName, err) + } + if perm != "" { + return perm + } + if !cloudMode { + perm, err = checkInnerPubRepoPerm(repoID) + if err != nil { + log.Printf("Failed to get inner pulic repo permission by repo id %s: %v", repoID, err) + return "" + } + return perm + } + return "" +} + +func getSharedDirsToUser(originRepoID string, toEmail string) (map[string]string, error) { + dirs := make(map[string]string) + sqlStr := "SELECT v.path, s.permission FROM SharedRepo s, VirtualRepo v WHERE " + + "s.repo_id = v.repo_id AND s.to_email = ? AND v.origin_repo = ?" + + rows, err := seafileDB.Query(sqlStr, toEmail, originRepoID) + if err != nil { + err := fmt.Errorf("Failed to get shared directories by user %s: %v", toEmail, err) + return nil, err + } + + defer rows.Close() + + var path string + var perm string + for rows.Next() { + if err := rows.Scan(&path, &perm); err == nil { + dirs[path] = perm + } + } + if err := rows.Err(); err != nil { + err := fmt.Errorf("Failed to get shared directories by user %s: %v", toEmail, err) + return nil, err + } + + return dirs, nil +} + +func getDirPerm(perms map[string]string, path string) string { + tmp := path + var perm string + for tmp != "" { + if perm, exists := perms[tmp]; exists { + return perm + } + tmp = filepath.Dir(tmp) + } + return perm +} + +func convertGroupListToStr(groups []group) string { + var groupIDs strings.Builder + + for i, group := range groups { + groupIDs.WriteString(strconv.Itoa(group.id)) + if i+1 < len(groups) { + groupIDs.WriteString(",") + } + } + return groupIDs.String() +} + +func getSharedDirsToGroup(originRepoID string, groups []group) (map[string]string, error) { + dirs := make(map[string]string) + groupIDs := convertGroupListToStr(groups) + + sqlStr := fmt.Sprintf("SELECT v.path, s.permission "+ + "FROM RepoGroup s, VirtualRepo v WHERE "+ + "s.repo_id = v.repo_id AND v.origin_repo = ? "+ + "AND s.group_id in (%s)", groupIDs) + + rows, err := seafileDB.Query(sqlStr, originRepoID) + if err != nil { + err := fmt.Errorf("Failed to get shared directories: %v", err) + return nil, err + } + + defer rows.Close() + + var path string + var perm string + for rows.Next() { + if err := rows.Scan(&path, &perm); err == nil { + dirs[path] = perm + } + } + + if err := rows.Err(); err != nil { + err := fmt.Errorf("Failed to get shared directories: %v", err) + return nil, err + } + + return dirs, nil +} + +func checkPermOnParentRepo(originRepoID, user, vPath string) string { + var perm string + userPerms, err := getSharedDirsToUser(originRepoID, user) + if err != nil { + log.Printf("Failed to get all shared folder perms in parent repo %.8s for user %s", originRepoID, user) + return "" + } + if len(userPerms) > 0 { + perm = getDirPerm(userPerms, vPath) + if perm != "" { + return perm + } + } + + groups, err := getGroupsByUser(user, false) + if err != nil { + log.Printf("Failed to get groups by user %s: %v", user, err) + } + if len(groups) == 0 { + return perm + } + + groupPerms, err := getSharedDirsToGroup(originRepoID, groups) + if err != nil { + log.Printf("Failed to get all shared folder perm from parent repo %.8s to all user groups", originRepoID) + return "" + } + if len(groupPerms) == 0 { + return "" + } + + perm = getDirPerm(groupPerms, vPath) + + return perm +} + +// SharedRepo is a shared repo object +type SharedRepo struct { + Version int `json:"version"` + ID string `json:"id"` + HeadCommitID string `json:"head_commit_id"` + Name string `json:"name"` + MTime int64 `json:"mtime"` + Permission string `json:"permission"` + Type string `json:"type"` + Owner string `json:"owner"` +} + +// GetReposByOwner get repos by owner +func GetReposByOwner(email string) ([]*SharedRepo, error) { + var repos []*SharedRepo + + query := "SELECT o.repo_id, b.commit_id, i.name, " + + "i.version, i.update_time, i.last_modifier FROM " + + "RepoOwner o LEFT JOIN Branch b ON o.repo_id = b.repo_id " + + "LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id " + + "WHERE owner_id=? AND " + + "o.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) " + + "ORDER BY i.update_time DESC, o.repo_id" + + stmt, err := seafileDB.Prepare(query) + if err != nil { + return nil, err + } + defer stmt.Close() + + rows, err := stmt.Query(email) + + if err != nil { + return nil, err + } + + defer rows.Close() + + for rows.Next() { + repo := new(SharedRepo) + var repoName, lastModifier sql.NullString + if err := rows.Scan(&repo.ID, &repo.HeadCommitID, + &repoName, &repo.Version, &repo.MTime, + &lastModifier); err == nil { + + if repo.HeadCommitID == "" { + continue + } + if !repoName.Valid || !lastModifier.Valid { + continue + } + if repoName.String == "" || lastModifier.String == "" { + continue + } + repo.Name = repoName.String + repos = append(repos, repo) + } + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return repos, nil +} + +// ListInnerPubRepos get inner public repos +func ListInnerPubRepos() ([]*SharedRepo, error) { + query := "SELECT InnerPubRepo.repo_id, " + + "owner_id, permission, commit_id, i.name, " + + "i.update_time, i.version " + + "FROM InnerPubRepo " + + "LEFT JOIN RepoInfo i ON InnerPubRepo.repo_id = i.repo_id, RepoOwner, Branch " + + "WHERE InnerPubRepo.repo_id=RepoOwner.repo_id AND " + + "InnerPubRepo.repo_id = Branch.repo_id AND Branch.name = 'master'" + + stmt, err := seafileDB.Prepare(query) + if err != nil { + return nil, err + } + defer stmt.Close() + + rows, err := stmt.Query() + if err != nil { + return nil, err + } + + defer rows.Close() + + var repos []*SharedRepo + for rows.Next() { + repo := new(SharedRepo) + var repoName sql.NullString + if err := rows.Scan(&repo.ID, &repo.Owner, + &repo.Permission, &repo.HeadCommitID, &repoName, + &repo.MTime, &repo.Version); err == nil { + + if !repoName.Valid { + continue + } + if repoName.String == "" { + continue + } + repo.Name = repoName.String + repos = append(repos, repo) + } + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return repos, nil +} + +// ListShareRepos list share repos by email +func ListShareRepos(email, columnType string) ([]*SharedRepo, error) { + var repos []*SharedRepo + var query string + if columnType == "from_email" { + query = "SELECT sh.repo_id, to_email, " + + "permission, commit_id, " + + "i.name, i.update_time, i.version FROM " + + "SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b " + + "WHERE from_email=? AND " + + "sh.repo_id = b.repo_id AND " + + "b.name = 'master' " + + "ORDER BY i.update_time DESC, sh.repo_id" + } else if columnType == "to_email" { + query = "SELECT sh.repo_id, from_email, " + + "permission, commit_id, " + + "i.name, i.update_time, i.version FROM " + + "SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b " + + "WHERE to_email=? AND " + + "sh.repo_id = b.repo_id AND " + + "b.name = 'master' " + + "ORDER BY i.update_time DESC, sh.repo_id" + } else { + err := fmt.Errorf("Wrong column type: %s", columnType) + return nil, err + } + + stmt, err := seafileDB.Prepare(query) + if err != nil { + return nil, err + } + + defer stmt.Close() + + rows, err := stmt.Query(email) + if err != nil { + return nil, err + } + + defer rows.Close() + + for rows.Next() { + repo := new(SharedRepo) + var repoName sql.NullString + if err := rows.Scan(&repo.ID, &repo.Owner, + &repo.Permission, &repo.HeadCommitID, + &repoName, &repo.MTime, &repo.Version); err == nil { + + if !repoName.Valid { + continue + } + if repoName.String == "" { + continue + } + repo.Name = repoName.String + + repos = append(repos, repo) + } + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return repos, nil +} diff --git a/fileserver/size_sched.go b/fileserver/size_sched.go new file mode 100644 index 0000000..64985a3 --- /dev/null +++ b/fileserver/size_sched.go @@ -0,0 +1,239 @@ +package main + +import ( + "fmt" + "log" + "path/filepath" + + "gopkg.in/ini.v1" + + "database/sql" + + "github.com/haiwen/seafile-server/fileserver/commitmgr" + "github.com/haiwen/seafile-server/fileserver/diff" + "github.com/haiwen/seafile-server/fileserver/fsmgr" + "github.com/haiwen/seafile-server/fileserver/repomgr" +) + +// Job is the job object of workpool. +type Job struct { + callback jobCB + repoID string +} + +type jobCB func(repoID string) error + +var jobs = make(chan Job, 100) + +func sizeSchedulerInit() { + var n int = 1 + var seafileConfPath string + if centralDir != "" { + seafileConfPath = filepath.Join(centralDir, "seafile.conf") + } else { + seafileConfPath = filepath.Join(absDataDir, "seafile.conf") + } + config, err := ini.Load(seafileConfPath) + if err != nil { + log.Fatalf("Failed to load seafile.conf: %v", err) + } + if section, err := config.GetSection("scheduler"); err == nil { + if key, err := section.GetKey("size_sched_thread_num"); err == nil { + num, err := key.Int() + if err == nil { + n = num + } + } + } + go createWorkerPool(n) +} + +// need to start a go routine +func createWorkerPool(n int) { + for i := 0; i < n; i++ { + go worker() + } +} + +func worker() { + for { + select { + case job := <-jobs: + if job.callback != nil { + err := job.callback(job.repoID) + if err != nil { + log.Printf("failed to call jobs: %v.\n", err) + } + } + } + } +} + +func updateRepoSize(repoID string) { + job := Job{computeRepoSize, repoID} + jobs <- job +} + +func computeRepoSize(repoID string) error { + var size int64 + var fileCount int64 + + repo := repomgr.Get(repoID) + if repo == nil { + err := fmt.Errorf("failed to get repo %s", repoID) + return err + } + + info, err := getOldRepoInfo(repoID) + if err != nil { + err := fmt.Errorf("failed to get old repo info: %v", err) + return err + } + + if info != nil && info.HeadID == repo.HeadCommitID { + return nil + } + + head, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + err := fmt.Errorf("failed to get head commit %s", repo.HeadCommitID) + return err + } + + var oldHead *commitmgr.Commit + if info != nil { + commit, _ := commitmgr.Load(repo.ID, info.HeadID) + oldHead = commit + } + + if info != nil && oldHead != nil { + var results []*diff.DiffEntry + var changeSize int64 + var changeFileCount int64 + err := diff.DiffCommits(oldHead, head, &results, false) + if err != nil { + err := fmt.Errorf("failed to do diff commits: %v", err) + return err + } + + for _, de := range results { + if de.Status == diff.DiffStatusDeleted { + changeSize -= de.Size + changeFileCount-- + } else if de.Status == diff.DiffStatusAdded { + changeSize += de.Size + changeFileCount++ + } else if de.Status == diff.DiffStatusModified { + changeSize = changeSize + de.Size - de.OriginSize + } + } + size = info.Size + changeSize + fileCount = info.FileCount + changeFileCount + } else { + info, err := fsmgr.GetFileCountInfoByPath(repo.StoreID, repo.RootID, "/") + if err != nil { + err := fmt.Errorf("failed to get file count") + return err + } + + fileCount = info.FileCount + size = info.Size + } + + err = setRepoSizeAndFileCount(repoID, repo.HeadCommitID, size, fileCount) + if err != nil { + err := fmt.Errorf("failed to set repo size and file count %s: %v", repoID, err) + return err + } + + return nil +} + +func setRepoSizeAndFileCount(repoID, newHeadID string, size, fileCount int64) error { + trans, err := seafileDB.Begin() + if err != nil { + err := fmt.Errorf("failed to start transaction: %v", err) + return err + } + + var headID string + sqlStr := "SELECT head_id FROM RepoSize WHERE repo_id=?" + + row := trans.QueryRow(sqlStr, repoID) + if err := row.Scan(&headID); err != nil { + if err != sql.ErrNoRows { + trans.Rollback() + return err + } + } + + if headID == "" { + sqlStr := "INSERT INTO RepoSize (repo_id, size, head_id) VALUES (?, ?, ?)" + _, err = trans.Exec(sqlStr, repoID, size, newHeadID) + if err != nil { + trans.Rollback() + return err + } + } else { + sqlStr = "UPDATE RepoSize SET size = ?, head_id = ? WHERE repo_id = ?" + _, err = trans.Exec(sqlStr, size, newHeadID, repoID) + if err != nil { + trans.Rollback() + return err + } + } + + var exist int + sqlStr = "SELECT 1 FROM RepoFileCount WHERE repo_id=?" + row = trans.QueryRow(sqlStr, repoID) + if err := row.Scan(&exist); err != nil { + if err != sql.ErrNoRows { + trans.Rollback() + return err + } + } + + if exist != 0 { + sqlStr := "UPDATE RepoFileCount SET file_count=? WHERE repo_id=?" + _, err = trans.Exec(sqlStr, fileCount, repoID) + if err != nil { + trans.Rollback() + return err + } + } else { + sqlStr := "INSERT INTO RepoFileCount (repo_id,file_count) VALUES (?,?)" + _, err = trans.Exec(sqlStr, repoID, fileCount) + if err != nil { + trans.Rollback() + return err + } + } + + trans.Commit() + + return nil +} + +// RepoInfo contains repo information. +type RepoInfo struct { + HeadID string + Size int64 + FileCount int64 +} + +func getOldRepoInfo(repoID string) (*RepoInfo, error) { + sqlStr := "select s.head_id,s.size,f.file_count FROM RepoSize s LEFT JOIN RepoFileCount f ON " + + "s.repo_id=f.repo_id WHERE s.repo_id=?" + + repoInfo := new(RepoInfo) + row := seafileDB.QueryRow(sqlStr, repoID) + if err := row.Scan(&repoInfo.HeadID, &repoInfo.Size, &repoInfo.FileCount); err != nil { + if err != sql.ErrNoRows { + return nil, err + } + + return nil, nil + } + + return repoInfo, nil +} diff --git a/fileserver/sync_api.go b/fileserver/sync_api.go new file mode 100644 index 0000000..4287443 --- /dev/null +++ b/fileserver/sync_api.go @@ -0,0 +1,1261 @@ +package main + +import ( + "bytes" + "database/sql" + "encoding/binary" + "encoding/json" + "fmt" + "html" + "io/ioutil" + "log" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/google/uuid" + "github.com/gorilla/mux" + "github.com/haiwen/seafile-server/fileserver/blockmgr" + "github.com/haiwen/seafile-server/fileserver/commitmgr" + "github.com/haiwen/seafile-server/fileserver/diff" + "github.com/haiwen/seafile-server/fileserver/fsmgr" + "github.com/haiwen/seafile-server/fileserver/repomgr" + "github.com/haiwen/seafile-server/fileserver/share" +) + +type checkExistType int32 + +const ( + checkFSExist checkExistType = 0 + checkBlockExist checkExistType = 1 +) + +const ( + seafileServerChannelEvent = "seaf_server.event" + seafileServerChannelStats = "seaf_server.stats" + emptySHA1 = "0000000000000000000000000000000000000000" + tokenExpireTime = 7200 + permExpireTime = 7200 + virtualRepoExpireTime = 7200 + cleaningIntervalSec = 300 +) + +var ( + tokenCache sync.Map + permCache sync.Map + virtualRepoInfoCache sync.Map +) + +type tokenInfo struct { + repoID string + email string + expireTime int64 +} + +type permInfo struct { + perm string + expireTime int64 +} + +type virtualRepoInfo struct { + storeID string + expireTime int64 +} + +type repoEventData struct { + eType string + user string + ip string + repoID string + path string + clientName string +} + +type statusEventData struct { + eType string + user string + repoID string + bytes uint64 +} + +func syncAPIInit() { + ticker := time.NewTicker(time.Second * cleaningIntervalSec) + go func() { + for range ticker.C { + removeExpireCache() + } + }() +} + +func permissionCheckCB(rsp http.ResponseWriter, r *http.Request) *appError { + queries := r.URL.Query() + + op := queries.Get("op") + if op != "download" && op != "upload" { + msg := "op is invalid" + return &appError{nil, msg, http.StatusBadRequest} + } + + clientID := queries.Get("client_id") + if clientID != "" && len(clientID) != 40 { + msg := "client_id is invalid" + return &appError{nil, msg, http.StatusBadRequest} + } + + clientVer := queries.Get("client_ver") + if clientVer != "" { + status := validateClientVer(clientVer) + if status != http.StatusOK { + msg := "client_ver is invalid" + return &appError{nil, msg, status} + } + } + + clientName := queries.Get("client_name") + if clientName != "" { + clientName = html.UnescapeString(clientName) + } + + vars := mux.Vars(r) + repoID := vars["repoid"] + repo := repomgr.GetEx(repoID) + if repo == nil { + msg := "repo was deleted" + return &appError{nil, msg, seafHTTPResRepoDeleted} + } + + if repo.IsCorrupted { + msg := "repo was corrupted" + return &appError{nil, msg, seafHTTPResRepoCorrupted} + } + + user, err := validateToken(r, repoID, true) + if err != nil { + return err + } + err = checkPermission(repoID, user, op, true) + if err != nil { + return err + } + ip := getClientIPAddr(r) + if ip == "" { + token := r.Header.Get("Seafile-Repo-Token") + err := fmt.Errorf("%s failed to get client ip", token) + return &appError{err, "", http.StatusInternalServerError} + } + + if op == "download" { + onRepoOper("repo-download-sync", repoID, user, ip, clientName) + } + if clientID != "" && clientName != "" { + token := r.Header.Get("Seafile-Repo-Token") + exists, err := repomgr.TokenPeerInfoExists(token) + if err != nil { + err := fmt.Errorf("Failed to check whether token %s peer info exist: %v", token, err) + return &appError{err, "", http.StatusInternalServerError} + } + if !exists { + if err := repomgr.AddTokenPeerInfo(token, clientID, ip, clientName, clientVer, int64(time.Now().Second())); err != nil { + err := fmt.Errorf("Failed to add token peer info: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + } else { + if err := repomgr.UpdateTokenPeerInfo(token, clientID, clientVer, int64(time.Now().Second())); err != nil { + err := fmt.Errorf("Failed to update token peer info: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + } + } + return nil +} +func getBlockMapCB(rsp http.ResponseWriter, r *http.Request) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + fileID := vars["id"] + + _, appErr := validateToken(r, repoID, false) + if appErr != nil { + return appErr + } + + storeID, err := getRepoStoreID(repoID) + if err != nil { + err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) + return &appError{err, "", http.StatusInternalServerError} + } + + seafile, err := fsmgr.GetSeafile(storeID, fileID) + if err != nil { + msg := fmt.Sprintf("Failed to get seafile object by file id %s: %v", fileID, err) + return &appError{nil, msg, http.StatusNotFound} + } + + var blockSizes []int64 + for _, blockID := range seafile.BlkIDs { + blockSize, err := blockmgr.Stat(storeID, blockID) + if err != nil { + err := fmt.Errorf("Failed to find block %s/%s", storeID, blockID) + return &appError{err, "", http.StatusInternalServerError} + } + blockSizes = append(blockSizes, blockSize) + } + + var data []byte + if blockSizes != nil { + data, err = json.Marshal(blockSizes) + if err != nil { + err := fmt.Errorf("Failed to marshal json: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + } else { + data = []byte{'[', ']'} + } + + rsp.Header().Set("Content-Length", strconv.Itoa(len(data))) + rsp.WriteHeader(http.StatusOK) + rsp.Write(data) + + return nil +} + +func getAccessibleRepoListCB(rsp http.ResponseWriter, r *http.Request) *appError { + queries := r.URL.Query() + repoID := queries.Get("repo_id") + + if repoID == "" || !isValidUUID(repoID) { + msg := "Invalid repo id." + return &appError{nil, msg, http.StatusBadRequest} + } + + user, appErr := validateToken(r, repoID, false) + if appErr != nil { + return appErr + } + + obtainedRepos := make(map[string]string) + + repos, err := share.GetReposByOwner(user) + if err != nil { + err := fmt.Errorf("Failed to get repos by owner %s: %v", user, err) + return &appError{err, "", http.StatusInternalServerError} + } + + var repoObjects []*share.SharedRepo + for _, repo := range repos { + if _, ok := obtainedRepos[repo.ID]; !ok { + obtainedRepos[repo.ID] = repo.ID + } + repo.Permission = "rw" + repo.Type = "repo" + repo.Owner = user + repoObjects = append(repoObjects, repo) + } + + repos, err = share.ListShareRepos(user, "to_email") + if err != nil { + err := fmt.Errorf("Failed to get share repos by user %s: %v", user, err) + return &appError{err, "", http.StatusInternalServerError} + } + for _, sRepo := range repos { + if _, ok := obtainedRepos[sRepo.ID]; ok { + continue + } + sRepo.Type = "srepo" + sRepo.Owner = strings.ToLower(sRepo.Owner) + repoObjects = append(repoObjects, sRepo) + } + + repos, err = share.GetGroupReposByUser(user, -1) + if err != nil { + err := fmt.Errorf("Failed to get group repos by user %s: %v", user, err) + return &appError{err, "", http.StatusInternalServerError} + } + reposTable := filterGroupRepos(repos) + + for _, gRepo := range reposTable { + if _, ok := obtainedRepos[gRepo.ID]; ok { + continue + } + + gRepo.Type = "grepo" + gRepo.Owner = strings.ToLower(gRepo.Owner) + repoObjects = append(repoObjects, gRepo) + } + + repos, err = share.ListInnerPubRepos() + if err != nil { + err := fmt.Errorf("Failed to get inner public repos: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + + for _, sRepo := range repos { + if _, ok := obtainedRepos[sRepo.ID]; ok { + continue + } + + sRepo.Type = "grepo" + sRepo.Owner = "Organization" + repoObjects = append(repoObjects, sRepo) + } + + var data []byte + if repoObjects != nil { + data, err = json.Marshal(repoObjects) + if err != nil { + err := fmt.Errorf("Failed to marshal json: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + } else { + data = []byte{'[', ']'} + } + rsp.Header().Set("Content-Length", strconv.Itoa(len(data))) + rsp.WriteHeader(http.StatusOK) + rsp.Write(data) + return nil +} + +func filterGroupRepos(repos []*share.SharedRepo) map[string]*share.SharedRepo { + table := make(map[string]*share.SharedRepo) + + for _, repo := range repos { + if repoPrev, ok := table[repo.ID]; ok { + if repo.Permission == "rw" && repoPrev.Permission == "r" { + table[repo.ID] = repo + } + } else { + table[repo.ID] = repo + } + } + + return table +} + +func recvFSCB(rsp http.ResponseWriter, r *http.Request) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + + user, appErr := validateToken(r, repoID, false) + if appErr != nil { + return appErr + } + + appErr = checkPermission(repoID, user, "upload", false) + if appErr != nil { + return appErr + } + + storeID, err := getRepoStoreID(repoID) + if err != nil { + err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) + return &appError{err, "", http.StatusInternalServerError} + } + fsBuf, err := ioutil.ReadAll(r.Body) + if err != nil { + return &appError{nil, err.Error(), http.StatusBadRequest} + } + + for len(fsBuf) > 44 { + objID := string(fsBuf[:40]) + if !isObjectIDValid(objID) { + msg := fmt.Sprintf("Fs obj id %s is invalid", objID) + return &appError{nil, msg, http.StatusBadRequest} + } + + var objSize uint32 + sizeBuffer := bytes.NewBuffer(fsBuf[40:44]) + if err := binary.Read(sizeBuffer, binary.BigEndian, &objSize); err != nil { + msg := fmt.Sprintf("Failed to read fs obj size: %v", err) + return &appError{nil, msg, http.StatusBadRequest} + } + + if len(fsBuf) < int(44+objSize) { + msg := "Request body size invalid" + return &appError{nil, msg, http.StatusBadRequest} + } + + objBuffer := bytes.NewBuffer(fsBuf[44 : 44+objSize]) + if err := fsmgr.WriteRaw(storeID, objID, objBuffer); err != nil { + err := fmt.Errorf("Failed to write fs obj %s:%s : %v", storeID, objID, err) + return &appError{err, "", http.StatusInternalServerError} + } + fsBuf = fsBuf[44+objSize:] + } + if len(fsBuf) == 0 { + rsp.WriteHeader(http.StatusOK) + return nil + } + + msg := "Request body size invalid" + return &appError{nil, msg, http.StatusBadRequest} +} +func checkFSCB(rsp http.ResponseWriter, r *http.Request) *appError { + return postCheckExistCB(rsp, r, checkFSExist) +} + +func checkBlockCB(rsp http.ResponseWriter, r *http.Request) *appError { + return postCheckExistCB(rsp, r, checkBlockExist) +} + +func postCheckExistCB(rsp http.ResponseWriter, r *http.Request, existType checkExistType) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + + _, appErr := validateToken(r, repoID, false) + if appErr != nil { + return appErr + } + + storeID, err := getRepoStoreID(repoID) + if err != nil { + err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) + return &appError{err, "", http.StatusInternalServerError} + } + + var objIDList []string + if err := json.NewDecoder(r.Body).Decode(&objIDList); err != nil { + return &appError{nil, err.Error(), http.StatusBadRequest} + } + + var neededObjs []string + var ret bool + for i := 0; i < len(objIDList); i++ { + if !isObjectIDValid(objIDList[i]) { + continue + } + if existType == checkFSExist { + ret, _ = fsmgr.Exists(storeID, objIDList[i]) + } else if existType == checkBlockExist { + ret = blockmgr.Exists(storeID, objIDList[i]) + } + if !ret { + neededObjs = append(neededObjs, objIDList[i]) + } + } + + var data []byte + if neededObjs != nil { + data, err = json.Marshal(neededObjs) + if err != nil { + err := fmt.Errorf("Failed to marshal json: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + } else { + data = []byte{'[', ']'} + } + rsp.Header().Set("Content-Length", strconv.Itoa(len(data))) + rsp.WriteHeader(http.StatusOK) + rsp.Write(data) + + return nil +} + +func packFSCB(rsp http.ResponseWriter, r *http.Request) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + + _, appErr := validateToken(r, repoID, false) + if appErr != nil { + return appErr + } + + storeID, err := getRepoStoreID(repoID) + if err != nil { + err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) + return &appError{err, "", http.StatusInternalServerError} + } + + var fsIDList []string + if err := json.NewDecoder(r.Body).Decode(&fsIDList); err != nil { + return &appError{nil, err.Error(), http.StatusBadRequest} + } + + var data bytes.Buffer + for i := 0; i < len(fsIDList); i++ { + if !isObjectIDValid(fsIDList[i]) { + msg := fmt.Sprintf("Invalid fs id %s", fsIDList[i]) + return &appError{nil, msg, http.StatusBadRequest} + } + data.WriteString(fsIDList[i]) + var tmp bytes.Buffer + if err := fsmgr.ReadRaw(storeID, fsIDList[i], &tmp); err != nil { + err := fmt.Errorf("Failed to read fs %s:%s: %v", storeID, fsIDList[i], err) + return &appError{err, "", http.StatusInternalServerError} + } + tmpLen := make([]byte, 4) + binary.BigEndian.PutUint32(tmpLen, uint32(tmp.Len())) + data.Write(tmpLen) + data.Write(tmp.Bytes()) + } + + rsp.Header().Set("Content-Length", strconv.Itoa(data.Len())) + rsp.WriteHeader(http.StatusOK) + rsp.Write(data.Bytes()) + return nil +} + +func headCommitsMultiCB(rsp http.ResponseWriter, r *http.Request) *appError { + var repoIDList []string + if err := json.NewDecoder(r.Body).Decode(&repoIDList); err != nil { + return &appError{err, "", http.StatusBadRequest} + } + if len(repoIDList) == 0 { + return &appError{nil, "", http.StatusBadRequest} + } + + var repoIDs strings.Builder + for i := 0; i < len(repoIDList); i++ { + if !isValidUUID(repoIDList[i]) { + return &appError{nil, "", http.StatusBadRequest} + } + if i == 0 { + repoIDs.WriteString(fmt.Sprintf("'%s'", repoIDList[i])) + } else { + repoIDs.WriteString(fmt.Sprintf(",'%s'", repoIDList[i])) + } + } + + sqlStr := fmt.Sprintf( + "SELECT repo_id, commit_id FROM Branch WHERE name='master' AND "+ + "repo_id IN (%s) LOCK IN SHARE MODE", + repoIDs.String()) + + rows, err := seafileDB.Query(sqlStr) + if err != nil { + err := fmt.Errorf("Failed to get commit id: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + + defer rows.Close() + + commitIDMap := make(map[string]string) + var repoID string + var commitID string + for rows.Next() { + if err := rows.Scan(&repoID, &commitID); err == nil { + commitIDMap[repoID] = commitID + } + } + + if err := rows.Err(); err != nil { + err := fmt.Errorf("Failed to get commit id: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + + data, err := json.Marshal(commitIDMap) + if err != nil { + err := fmt.Errorf("Failed to marshal json: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + + rsp.Header().Set("Content-Length", strconv.Itoa(len(data))) + rsp.WriteHeader(http.StatusOK) + rsp.Write(data) + + return nil +} + +func getCheckQuotaCB(rsp http.ResponseWriter, r *http.Request) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + + if _, err := validateToken(r, repoID, false); err != nil { + return err + } + + queries := r.URL.Query() + delta := queries.Get("delta") + if delta == "" { + msg := "Invalid delta parameter" + return &appError{nil, msg, http.StatusBadRequest} + } + + deltaNum, err := strconv.ParseInt(delta, 10, 64) + if err != nil { + msg := "Invalid delta parameter" + return &appError{nil, msg, http.StatusBadRequest} + } + + ret, err := checkQuota(repoID, deltaNum) + if err != nil { + msg := "Internal error.\n" + err := fmt.Errorf("failed to check quota: %v", err) + return &appError{err, msg, http.StatusInternalServerError} + } + if ret == 1 { + msg := "Out of quota.\n" + return &appError{nil, msg, seafHTTPResNoQuota} + } + + return nil +} + +func isValidUUID(u string) bool { + _, err := uuid.Parse(u) + return err == nil +} + +func getFsObjIDCB(rsp http.ResponseWriter, r *http.Request) *appError { + queries := r.URL.Query() + + serverHead := queries.Get("server-head") + if !isObjectIDValid(serverHead) { + msg := "Invalid server-head parameter." + return &appError{nil, msg, http.StatusBadRequest} + } + + clientHead := queries.Get("client-head") + if clientHead != "" && !isObjectIDValid(clientHead) { + msg := "Invalid client-head parameter." + return &appError{nil, msg, http.StatusBadRequest} + } + + dirOnlyArg := queries.Get("dir-only") + var dirOnly bool + if dirOnlyArg != "" { + dirOnly = true + } + + vars := mux.Vars(r) + repoID := vars["repoid"] + if _, err := validateToken(r, repoID, false); err != nil { + return err + } + repo := repomgr.Get(repoID) + if repo == nil { + err := fmt.Errorf("Failed to find repo %.8s", repoID) + return &appError{err, "", http.StatusInternalServerError} + } + ret, err := calculateSendObjectList(repo, serverHead, clientHead, dirOnly) + if err != nil { + err := fmt.Errorf("Failed to get fs id list: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + + var objList []byte + if ret != nil { + objList, err = json.Marshal(ret) + if err != nil { + return &appError{err, "", http.StatusInternalServerError} + } + } else { + // when get obj list is nil, return [] + objList = []byte{'[', ']'} + } + + rsp.Header().Set("Content-Length", strconv.Itoa(len(objList))) + rsp.WriteHeader(http.StatusOK) + rsp.Write(objList) + + return nil +} + +func headCommitOperCB(rsp http.ResponseWriter, r *http.Request) *appError { + if r.Method == http.MethodGet { + return getHeadCommit(rsp, r) + } else if r.Method == http.MethodPut { + return putUpdateBranchCB(rsp, r) + } + return &appError{nil, "", http.StatusBadRequest} +} + +func commitOperCB(rsp http.ResponseWriter, r *http.Request) *appError { + if r.Method == http.MethodGet { + return getCommitInfo(rsp, r) + } else if r.Method == http.MethodPut { + return putCommitCB(rsp, r) + } + return &appError{nil, "", http.StatusBadRequest} +} + +func blockOperCB(rsp http.ResponseWriter, r *http.Request) *appError { + if r.Method == http.MethodGet { + return getBlockInfo(rsp, r) + } else if r.Method == http.MethodPut { + return putSendBlockCB(rsp, r) + } + return &appError{nil, "", http.StatusBadRequest} +} + +func putSendBlockCB(rsp http.ResponseWriter, r *http.Request) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + blockID := vars["id"] + + user, appErr := validateToken(r, repoID, false) + if appErr != nil { + return appErr + } + + appErr = checkPermission(repoID, user, "upload", false) + if appErr != nil { + return appErr + } + + storeID, err := getRepoStoreID(repoID) + if err != nil { + err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) + return &appError{err, "", http.StatusInternalServerError} + } + + if err := blockmgr.Write(storeID, blockID, r.Body); err != nil { + err := fmt.Errorf("Failed to close block %.8s:%s", storeID, blockID) + return &appError{err, "", http.StatusInternalServerError} + } + + rsp.WriteHeader(http.StatusOK) + + sendStatisticMsg(storeID, user, "sync-file-upload", uint64(r.ContentLength)) + + return nil +} + +func getBlockInfo(rsp http.ResponseWriter, r *http.Request) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + blockID := vars["id"] + + user, appErr := validateToken(r, repoID, false) + if appErr != nil { + return appErr + } + + storeID, err := getRepoStoreID(repoID) + if err != nil { + err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) + return &appError{err, "", http.StatusInternalServerError} + } + + blockSize, err := blockmgr.Stat(storeID, blockID) + if err != nil { + return &appError{err, "", http.StatusInternalServerError} + } + if blockSize <= 0 { + err := fmt.Errorf("block %.8s:%s size invalid", storeID, blockID) + return &appError{err, "", http.StatusInternalServerError} + } + + blockLen := fmt.Sprintf("%d", blockSize) + rsp.Header().Set("Content-Length", blockLen) + rsp.WriteHeader(http.StatusOK) + if err := blockmgr.Read(storeID, blockID, rsp); err != nil { + return &appError{err, "", http.StatusInternalServerError} + } + + sendStatisticMsg(storeID, user, "sync-file-download", uint64(blockSize)) + return nil +} + +func getRepoStoreID(repoID string) (string, error) { + var storeID string + + if value, ok := virtualRepoInfoCache.Load(repoID); ok { + if info, ok := value.(*virtualRepoInfo); ok { + if info.storeID != "" { + storeID = info.storeID + } else { + storeID = repoID + } + info.expireTime = time.Now().Unix() + virtualRepoExpireTime + } + } + if storeID != "" { + return storeID, nil + } + + var vInfo virtualRepoInfo + var rID, originRepoID sql.NullString + sqlStr := "SELECT repo_id, origin_repo FROM VirtualRepo where repo_id = ?" + row := seafileDB.QueryRow(sqlStr, repoID) + if err := row.Scan(&rID, &originRepoID); err != nil { + if err == sql.ErrNoRows { + vInfo.storeID = repoID + vInfo.expireTime = time.Now().Unix() + virtualRepoExpireTime + virtualRepoInfoCache.Store(repoID, &vInfo) + return repoID, nil + } + return "", err + } + + if !rID.Valid || !originRepoID.Valid { + return "", nil + } + + vInfo.storeID = originRepoID.String + vInfo.expireTime = time.Now().Unix() + virtualRepoExpireTime + virtualRepoInfoCache.Store(repoID, &vInfo) + return originRepoID.String, nil +} + +func sendStatisticMsg(repoID, user, operation string, bytes uint64) { + rData := &statusEventData{operation, user, repoID, bytes} + + publishStatusEvent(rData) +} + +func publishStatusEvent(rData *statusEventData) { + buf := fmt.Sprintf("%s\t%s\t%s\t%d", + rData.eType, rData.user, + rData.repoID, rData.bytes) + if _, err := rpcclient.Call("publish_event", seafileServerChannelStats, buf); err != nil { + log.Printf("Failed to publish event: %v", err) + } +} + +func putCommitCB(rsp http.ResponseWriter, r *http.Request) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + commitID := vars["id"] + user, appErr := validateToken(r, repoID, false) + if appErr != nil { + return appErr + } + appErr = checkPermission(repoID, user, "upload", true) + if appErr != nil { + return appErr + } + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + return &appError{nil, err.Error(), http.StatusBadRequest} + } + + commit := new(commitmgr.Commit) + if err := commit.FromData(data); err != nil { + return &appError{nil, err.Error(), http.StatusBadRequest} + } + + if commit.RepoID != repoID { + msg := "The repo id in commit does not match current repo id" + return &appError{nil, msg, http.StatusBadRequest} + } + + if err := commitmgr.Save(commit); err != nil { + err := fmt.Errorf("Failed to add commit %s: %v", commitID, err) + return &appError{err, "", http.StatusInternalServerError} + } + + rsp.WriteHeader(http.StatusOK) + + return nil +} + +func getCommitInfo(rsp http.ResponseWriter, r *http.Request) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + commitID := vars["id"] + if _, err := validateToken(r, repoID, false); err != nil { + return err + } + if exists, _ := commitmgr.Exists(repoID, commitID); !exists { + log.Printf("%s:%s is missing", repoID, commitID) + return &appError{nil, "", http.StatusNotFound} + } + + var data bytes.Buffer + err := commitmgr.ReadRaw(repoID, commitID, &data) + if err != nil { + err := fmt.Errorf("Failed to read commit %s:%s: %v", repoID, commitID, err) + return &appError{err, "", http.StatusInternalServerError} + } + + dataLen := strconv.Itoa(data.Len()) + rsp.Header().Set("Content-Length", dataLen) + rsp.WriteHeader(http.StatusOK) + rsp.Write(data.Bytes()) + + return nil +} + +func putUpdateBranchCB(rsp http.ResponseWriter, r *http.Request) *appError { + queries := r.URL.Query() + newCommitID := queries.Get("head") + if newCommitID == "" || !isObjectIDValid(newCommitID) { + msg := fmt.Sprintf("commit id %s is invalid", newCommitID) + return &appError{nil, msg, http.StatusBadRequest} + } + + vars := mux.Vars(r) + repoID := vars["repoid"] + user, appErr := validateToken(r, repoID, false) + if appErr != nil { + return appErr + } + + appErr = checkPermission(repoID, user, "upload", false) + if appErr != nil && appErr.Code == http.StatusForbidden { + return appErr + } + + repo := repomgr.Get(repoID) + if repo == nil { + err := fmt.Errorf("Repo %s is missing or corrupted", repoID) + return &appError{err, "", http.StatusInternalServerError} + } + + newCommit, err := commitmgr.Load(repoID, newCommitID) + if err != nil { + err := fmt.Errorf("Failed to get commit %s for repo %s", newCommitID, repoID) + return &appError{err, "", http.StatusInternalServerError} + } + + base, err := commitmgr.Load(repoID, newCommit.ParentID) + if err != nil { + err := fmt.Errorf("Failed to get commit %s for repo %s", newCommit.ParentID, repoID) + return &appError{err, "", http.StatusInternalServerError} + } + + ret, err := checkQuota(repoID, 0) + if err != nil { + err := fmt.Errorf("Failed to check quota: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + if ret == 1 { + msg := "Out of quota.\n" + return &appError{nil, msg, seafHTTPResNoQuota} + } + + if err := fastForwardOrMerge(user, repo, base, newCommit); err != nil { + err := fmt.Errorf("Fast forward merge for repo %s is failed: %v", repoID, err) + return &appError{err, "", http.StatusInternalServerError} + } + + mergeVirtualRepo(repoID, "") + + if err := computeRepoSize(repoID); err != nil { + return &appError{err, "", http.StatusInternalServerError} + } + + rsp.WriteHeader(http.StatusOK) + return nil +} + +func getHeadCommit(rsp http.ResponseWriter, r *http.Request) *appError { + vars := mux.Vars(r) + repoID := vars["repoid"] + sqlStr := "SELECT EXISTS(SELECT 1 FROM Repo WHERE repo_id=?)" + var exists bool + row := seafileDB.QueryRow(sqlStr, repoID) + if err := row.Scan(&exists); err != nil { + if err != sql.ErrNoRows { + log.Printf("DB error when check repo %s existence: %v", repoID, err) + msg := `{"is_corrupted": 1}` + rsp.WriteHeader(http.StatusOK) + rsp.Write([]byte(msg)) + return nil + } + } + if !exists { + return &appError{nil, "", seafHTTPResRepoDeleted} + } + + if _, err := validateToken(r, repoID, false); err != nil { + return err + } + + var commitID string + sqlStr = "SELECT commit_id FROM Branch WHERE name='master' AND repo_id=?" + row = seafileDB.QueryRow(sqlStr, repoID) + + if err := row.Scan(&commitID); err != nil { + if err != sql.ErrNoRows { + log.Printf("DB error when get branch master: %v", err) + msg := `{"is_corrupted": 1}` + rsp.WriteHeader(http.StatusOK) + rsp.Write([]byte(msg)) + return nil + } + } + if commitID == "" { + return &appError{nil, "", http.StatusBadRequest} + } + + msg := fmt.Sprintf("{\"is_corrupted\": 0, \"head_commit_id\": \"%s\"}", commitID) + rsp.WriteHeader(http.StatusOK) + rsp.Write([]byte(msg)) + return nil +} + +func checkPermission(repoID, user, op string, skipCache bool) *appError { + var info *permInfo + if !skipCache { + if value, ok := permCache.Load(fmt.Sprintf("%s:%s", repoID, user)); ok { + info = value.(*permInfo) + } + } + if info != nil { + if info.perm == "r" && op == "upload" { + return &appError{nil, "", http.StatusForbidden} + } + return nil + } + + if op == "upload" { + status, err := repomgr.GetRepoStatus(repoID) + if err != nil { + msg := fmt.Sprintf("Failed to get repo status by repo id %s: %v", repoID, err) + return &appError{nil, msg, http.StatusForbidden} + } + if status != repomgr.RepoStatusNormal && status != -1 { + return &appError{nil, "", http.StatusForbidden} + } + } + + perm := share.CheckPerm(repoID, user) + if perm != "" { + info = new(permInfo) + info.perm = perm + info.expireTime = time.Now().Unix() + permExpireTime + permCache.Store(fmt.Sprintf("%s:%s", repoID, user), info) + if perm == "r" && op == "upload" { + return &appError{nil, "", http.StatusForbidden} + } + return nil + } + + permCache.Delete(fmt.Sprintf("%s:%s", repoID, user)) + + return &appError{nil, "", http.StatusForbidden} +} + +func validateToken(r *http.Request, repoID string, skipCache bool) (string, *appError) { + token := r.Header.Get("Seafile-Repo-Token") + if token == "" { + msg := "token is null" + return "", &appError{nil, msg, http.StatusBadRequest} + } + + if value, ok := tokenCache.Load(token); ok { + if info, ok := value.(*tokenInfo); ok { + return info.email, nil + } + } + + email, err := repomgr.GetEmailByToken(repoID, token) + if err != nil { + log.Printf("Failed to get email by token %s: %v", token, err) + tokenCache.Delete(token) + return email, &appError{err, "", http.StatusInternalServerError} + } + if email == "" { + msg := fmt.Sprintf("Failed to get email by token %s", token) + return email, &appError{nil, msg, http.StatusForbidden} + } + + info := new(tokenInfo) + info.email = email + info.expireTime = time.Now().Unix() + tokenExpireTime + info.repoID = repoID + tokenCache.Store(token, info) + + return email, nil +} + +func validateClientVer(clientVer string) int { + versions := strings.Split(clientVer, ".") + if len(versions) != 3 { + return http.StatusBadRequest + } + if _, err := strconv.Atoi(versions[0]); err != nil { + return http.StatusBadRequest + } + if _, err := strconv.Atoi(versions[1]); err != nil { + return http.StatusBadRequest + } + if _, err := strconv.Atoi(versions[2]); err != nil { + return http.StatusBadRequest + } + + return http.StatusOK +} + +func getClientIPAddr(r *http.Request) string { + xForwardedFor := r.Header.Get("X-Forwarded-For") + addr := strings.TrimSpace(strings.Split(xForwardedFor, ",")[0]) + ip := net.ParseIP(addr) + if ip != nil { + return ip.String() + } + + addr = strings.TrimSpace(r.Header.Get("X-Real-Ip")) + ip = net.ParseIP(addr) + if ip != nil { + return ip.String() + } + + if addr, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr)); err == nil { + ip = net.ParseIP(addr) + if ip != nil { + return ip.String() + } + } + + return "" +} + +func onRepoOper(eType, repoID, user, ip, clientName string) { + rData := new(repoEventData) + vInfo, err := repomgr.GetVirtualRepoInfo(repoID) + + if err != nil { + log.Printf("Failed to get virtual repo info by repo id %s: %v", repoID, err) + return + } + if vInfo != nil { + rData.repoID = vInfo.OriginRepoID + rData.path = vInfo.Path + } else { + rData.repoID = repoID + } + rData.eType = eType + rData.user = user + rData.ip = ip + rData.clientName = clientName + + publishRepoEvent(rData) +} + +func publishRepoEvent(rData *repoEventData) { + if rData.path == "" { + rData.path = "/" + } + buf := fmt.Sprintf("%s\t%s\t%s\t%s\t%s\t%s", + rData.eType, rData.user, rData.ip, + rData.clientName, rData.repoID, rData.path) + if _, err := rpcclient.Call("publish_event", seafileServerChannelEvent, buf); err != nil { + log.Printf("Failed to publish event: %v", err) + } +} + +func removeExpireCache() { + deleteTokens := func(key interface{}, value interface{}) bool { + if info, ok := value.(*tokenInfo); ok { + if info.expireTime <= time.Now().Unix() { + tokenCache.Delete(key) + } + } + return true + } + + deletePerms := func(key interface{}, value interface{}) bool { + if info, ok := value.(*permInfo); ok { + if info.expireTime <= time.Now().Unix() { + permCache.Delete(key) + } + } + return true + } + + deleteVirtualRepoInfo := func(key interface{}, value interface{}) bool { + if info, ok := value.(*virtualRepoInfo); ok { + if info.expireTime <= time.Now().Unix() { + virtualRepoInfoCache.Delete(key) + } + } + return true + } + + tokenCache.Range(deleteTokens) + permCache.Range(deletePerms) + virtualRepoInfoCache.Range(deleteVirtualRepoInfo) +} + +func calculateSendObjectList(repo *repomgr.Repo, serverHead string, clientHead string, dirOnly bool) ([]interface{}, error) { + masterHead, err := commitmgr.Load(repo.ID, serverHead) + if err != nil { + err := fmt.Errorf("Failed to load server head commit %s:%s: %v", repo.ID, serverHead, err) + return nil, err + } + var remoteHead *commitmgr.Commit + remoteHeadRoot := emptySHA1 + if clientHead != "" { + remoteHead, err = commitmgr.Load(repo.ID, clientHead) + if err != nil { + err := fmt.Errorf("Failed to load remote head commit %s:%s: %v", repo.ID, clientHead, err) + return nil, err + } + remoteHeadRoot = remoteHead.RootID + } + + var results []interface{} + if remoteHeadRoot != masterHead.RootID && masterHead.RootID != emptySHA1 { + results = append(results, masterHead.RootID) + } + + var opt *diff.DiffOptions + if !dirOnly { + opt = &diff.DiffOptions{ + FileCB: collectFileIDs, + DirCB: collectDirIDs, + RepoID: repo.ID} + opt.Data = &results + } else { + opt = &diff.DiffOptions{ + FileCB: collectFileIDsNOp, + DirCB: collectDirIDs, + RepoID: repo.ID} + opt.Data = &results + } + trees := []string{masterHead.RootID, remoteHeadRoot} + + if err := diff.DiffTrees(trees, opt); err != nil { + return nil, err + } + return results, nil +} + +func collectFileIDs(baseDir string, files []*fsmgr.SeafDirent, data interface{}) error { + file1 := files[0] + file2 := files[1] + results, ok := data.(*[]interface{}) + if !ok { + err := fmt.Errorf("failed to assert results") + return err + } + + if file1 != nil && + (file2 == nil || file1.ID != file2.ID) && + file1.ID != emptySHA1 { + *results = append(*results, file1.ID) + } + + return nil +} + +func collectFileIDsNOp(baseDir string, files []*fsmgr.SeafDirent, data interface{}) error { + return nil +} + +func collectDirIDs(baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error { + dir1 := dirs[0] + dir2 := dirs[1] + results, ok := data.(*[]interface{}) + if !ok { + err := fmt.Errorf("failed to assert results") + return err + } + + if dir1 != nil && + (dir2 == nil || dir1.ID != dir2.ID) && + dir1.ID != emptySHA1 { + *results = append(*results, dir1.ID) + } + + return nil +} + +func isObjectIDValid(objID string) bool { + if len(objID) != 40 { + return false + } + for i := 0; i < len(objID); i++ { + c := objID[i] + if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') { + continue + } + return false + } + return true +} diff --git a/fileserver/virtual_repo.go b/fileserver/virtual_repo.go new file mode 100644 index 0000000..5b55516 --- /dev/null +++ b/fileserver/virtual_repo.go @@ -0,0 +1,322 @@ +package main + +import ( + "fmt" + "log" + "path/filepath" + "strings" + "time" + + "math/rand" + + "github.com/haiwen/seafile-server/fileserver/commitmgr" + "github.com/haiwen/seafile-server/fileserver/diff" + "github.com/haiwen/seafile-server/fileserver/fsmgr" + "github.com/haiwen/seafile-server/fileserver/repomgr" +) + +func mergeVirtualRepo(repoID, excludeRepo string) { + virtual, err := repomgr.IsVirtualRepo(repoID) + if err != nil { + return + } + + if virtual { + mergeRepo(repoID) + return + } + + vRepos, _ := repomgr.GetVirtualRepoIDsByOrigin(repoID) + for _, id := range vRepos { + if id == excludeRepo { + continue + } + + mergeRepo(id) + } + + return +} + +func mergeRepo(repoID string) error { + repo := repomgr.Get(repoID) + if repo == nil { + err := fmt.Errorf("failed to get virt repo %.10s", repoID) + return err + } + vInfo := repo.VirtualInfo + if vInfo == nil { + return nil + } + origRepo := repomgr.Get(vInfo.OriginRepoID) + if origRepo == nil { + err := fmt.Errorf("failed to get orig repo %.10s", repoID) + return err + } + + head, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + err := fmt.Errorf("failed to get commit %s:%.8s", repo.ID, repo.HeadCommitID) + return err + } + origHead, err := commitmgr.Load(origRepo.ID, origRepo.HeadCommitID) + if err != nil { + err := fmt.Errorf("failed to get commit %s:%.8s", origRepo.ID, origRepo.HeadCommitID) + return err + } + + var origRoot string + origRoot, _ = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, vInfo.Path) + if origRoot == "" { + newPath, _ := handleMissingVirtualRepo(origRepo, origHead, vInfo) + if newPath != "" { + origRoot, _ = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, newPath) + } + if origRoot == "" { + return nil + } + } + + base, err := commitmgr.Load(origRepo.ID, vInfo.BaseCommitID) + if err != nil { + err := fmt.Errorf("failed to get commit %s:%.8s", origRepo.ID, vInfo.BaseCommitID) + return err + } + + root := head.RootID + baseRoot, _ := fsmgr.GetSeafdirIDByPath(origRepo.StoreID, base.RootID, vInfo.Path) + if baseRoot == "" { + err := fmt.Errorf("cannot find seafdir for repo %.10s path %s", vInfo.OriginRepoID, vInfo.Path) + return err + } + + if root == origRoot { + } else if baseRoot == root { + _, err := updateDir(repoID, "/", origRoot, origHead.CreatorName, head.CommitID) + if err != nil { + err := fmt.Errorf("failed to update root of virtual repo %.10s", repoID) + return err + } + repomgr.SetVirtualRepoBaseCommitPath(repo.ID, origRepo.HeadCommitID, vInfo.Path) + } else if baseRoot == origRoot { + newBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, root, head.CreatorName, origHead.CommitID) + if err != nil { + err := fmt.Errorf("failed to update origin repo%.10s path %s", vInfo.OriginRepoID, vInfo.Path) + return err + } + repomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path) + cleanupVirtualRepos(vInfo.OriginRepoID) + mergeVirtualRepo(vInfo.OriginRepoID, repoID) + } else { + roots := []string{baseRoot, origRoot, root} + opt := new(mergeOptions) + opt.remoteRepoID = repoID + opt.remoteHead = head.CommitID + + err := mergeTrees(origRepo.StoreID, roots, opt) + if err != nil { + err := fmt.Errorf("failed to merge") + return err + } + + _, err = updateDir(repoID, "/", opt.mergedRoot, origHead.CreatorName, head.CommitID) + if err != nil { + err := fmt.Errorf("failed to update root of virtual repo %.10s", repoID) + return err + } + + newBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, opt.mergedRoot, head.CreatorName, origHead.CommitID) + if err != nil { + err := fmt.Errorf("failed to update origin repo %.10s path %s", vInfo.OriginRepoID, vInfo.Path) + return err + } + repomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path) + cleanupVirtualRepos(vInfo.OriginRepoID) + mergeVirtualRepo(vInfo.OriginRepoID, repoID) + } + + return nil +} + +func cleanupVirtualRepos(repoID string) error { + repo := repomgr.Get(repoID) + if repo == nil { + err := fmt.Errorf("failed to get repo %.10s", repoID) + return err + } + + head, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + err := fmt.Errorf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err) + return err + } + + vRepos, err := repomgr.GetVirtualRepoInfoByOrigin(repoID) + if err != nil { + err := fmt.Errorf("failed to get virtual repo ids by origin repo %.10s", repoID) + return err + } + for _, vInfo := range vRepos { + _, err := fsmgr.GetSeafdirByPath(repo.StoreID, head.RootID, vInfo.Path) + if err != nil { + if err == fsmgr.ErrPathNoExist { + handleMissingVirtualRepo(repo, head, vInfo) + } + } + } + + return nil +} + +func handleMissingVirtualRepo(repo *repomgr.Repo, head *commitmgr.Commit, vInfo *repomgr.VRepoInfo) (string, error) { + parent, err := commitmgr.Load(head.RepoID, head.ParentID) + if err != nil { + err := fmt.Errorf("failed to load commit %s/%s : %v", head.RepoID, head.ParentID, err) + return "", err + } + + var results []*diff.DiffEntry + err = diff.DiffCommits(parent, head, &results, true) + if err != nil { + err := fmt.Errorf("failed to diff commits") + return "", err + } + + parPath := vInfo.Path + var isRenamed bool + var subPath string + var returnPath string + for { + var newPath string + oldDirID, err := fsmgr.GetSeafdirIDByPath(repo.StoreID, parent.RootID, parPath) + if err != nil || oldDirID == "" { + + if err == fsmgr.ErrPathNoExist { + repomgr.DelVirtualRepo(vInfo.RepoID, cloudMode) + } + err := fmt.Errorf("failed to find %s under commit %s in repo %s", parPath, parent.CommitID, repo.StoreID) + return "", err + } + + for _, de := range results { + if de.Status == diff.DiffStatusDirRenamed { + if de.Sha1 == oldDirID { + if subPath != "" { + newPath = filepath.Join("/", de.NewName, subPath) + } else { + newPath = filepath.Join("/", de.NewName) + } + repomgr.SetVirtualRepoBaseCommitPath(vInfo.RepoID, head.CommitID, newPath) + returnPath = newPath + if subPath == "" { + newName := filepath.Base(newPath) + err := editRepo(repo.ID, newName, "Changed library name", "") + if err != nil { + log.Printf("falied to rename repo %s.\n", newName) + } + } + isRenamed = true + break + } + } + } + + if isRenamed { + break + } + + slash := strings.LastIndex(parPath, "/") + if slash <= 0 { + break + } + subPath = filepath.Base(parPath) + parPath = filepath.Dir(parPath) + } + + if !isRenamed { + repomgr.DelVirtualRepo(vInfo.RepoID, cloudMode) + } + + return returnPath, nil +} + +func editRepo(repoID, name, desc, user string) error { + if name == "" && desc == "" { + err := fmt.Errorf("at least one argument should be non-null") + return err + } + + var retryCnt int + for retry, err := editRepoNeedRetry(repoID, name, desc, user); err != nil || retry; { + if err != nil { + err := fmt.Errorf("failed to edit repo: %v", err) + return err + } + if retryCnt < 3 { + random := rand.Intn(10) + 1 + time.Sleep(time.Duration(random*100) * time.Millisecond) + retryCnt++ + } else { + err := fmt.Errorf("stop edit repo %s after 3 retries", repoID) + return err + } + } + + return nil +} + +func editRepoNeedRetry(repoID, name, desc, user string) (bool, error) { + repo := repomgr.Get(repoID) + if repo == nil { + err := fmt.Errorf("no such library") + return false, err + } + if name == "" { + name = repo.Name + } + if desc == "" { + desc = repo.Desc + } + + parent, err := commitmgr.Load(repo.ID, repo.HeadCommitID) + if err != nil { + err := fmt.Errorf("failed to get commit %s:%s", repo.ID, repo.HeadCommitID) + return false, err + } + + if user == "" { + user = parent.CreatorName + } + + commit := commitmgr.NewCommit(repoID, parent.CommitID, parent.RootID, user, "Changed library name or description") + repomgr.RepoToCommit(repo, commit) + commit.RepoName = name + commit.RepoDesc = desc + + err = commitmgr.Save(commit) + if err != nil { + err := fmt.Errorf("failed to add commit: %v", err) + return false, err + } + + err = updateBranch(repoID, commit.CommitID, parent.CommitID) + if err != nil { + return true, nil + } + + updateRepoInfo(repoID, commit.CommitID) + + return true, nil +} + +func updateRepoInfo(repoID, commitID string) error { + head, err := commitmgr.Load(repoID, commitID) + if err != nil { + err := fmt.Errorf("failed to get commit %s:%s", repoID, commitID) + return err + } + + repomgr.SetRepoCommitToDb(repoID, head.RepoName, head.Ctime, head.Version, head.Encrypted, head.CreatorName) + + return nil +} diff --git a/server/seafile-session.c b/server/seafile-session.c index 5c84019..ba753ef 100644 --- a/server/seafile-session.c +++ b/server/seafile-session.c @@ -111,6 +111,10 @@ seafile_session_new(const char *central_config_dir, "general", "cloud_mode", NULL); + session->go_fileserver = g_key_file_get_boolean (config, + "fileserver", "use_go_fileserver", + NULL); + if (load_database_config (session) < 0) { seaf_warning ("Failed to load database config.\n"); goto onerror; @@ -278,9 +282,11 @@ seafile_session_start (SeafileSession *session) return -1; } - if (seaf_http_server_start (session->http_server) < 0) { - seaf_warning ("Failed to start http server thread.\n"); - return -1; + if (!session->go_fileserver) { + if (seaf_http_server_start (session->http_server) < 0) { + seaf_warning ("Failed to start http server thread.\n"); + return -1; + } } return 0; diff --git a/server/seafile-session.h b/server/seafile-session.h index ffdf913..bcf7b95 100644 --- a/server/seafile-session.h +++ b/server/seafile-session.h @@ -76,6 +76,8 @@ struct _SeafileSession { gboolean create_tables; gboolean ccnet_create_tables; + + gboolean go_fileserver; }; extern SeafileSession *seaf; diff --git a/server/web-accesstoken-mgr.c b/server/web-accesstoken-mgr.c index 93bcdb6..6f943f1 100644 --- a/server/web-accesstoken-mgr.c +++ b/server/web-accesstoken-mgr.c @@ -176,29 +176,31 @@ seaf_web_at_manager_get_access_token (SeafWebAccessTokenManager *mgr, pthread_mutex_unlock (&mgr->priv->lock); - if (strcmp(op, "download-dir") == 0 || - strcmp(op, "download-multi") == 0 || - strcmp(op, "download-dir-link") == 0 || - strcmp(op, "download-multi-link") == 0) { + if (!seaf->go_fileserver) { + if (strcmp(op, "download-dir") == 0 || + strcmp(op, "download-multi") == 0 || + strcmp(op, "download-dir-link") == 0 || + strcmp(op, "download-multi-link") == 0) { - webaccess = g_object_new (SEAFILE_TYPE_WEB_ACCESS, - "repo_id", info->repo_id, - "obj_id", info->obj_id, - "op", info->op, - "username", info->username, - NULL); + webaccess = g_object_new (SEAFILE_TYPE_WEB_ACCESS, + "repo_id", info->repo_id, + "obj_id", info->obj_id, + "op", info->op, + "username", info->username, + NULL); - if (zip_download_mgr_start_zip_task (seaf->zip_download_mgr, - t, webaccess, error) < 0) { - pthread_mutex_lock (&mgr->priv->lock); - g_hash_table_remove (mgr->priv->access_token_hash, t); - pthread_mutex_unlock (&mgr->priv->lock); + if (zip_download_mgr_start_zip_task (seaf->zip_download_mgr, + t, webaccess, error) < 0) { + pthread_mutex_lock (&mgr->priv->lock); + g_hash_table_remove (mgr->priv->access_token_hash, t); + pthread_mutex_unlock (&mgr->priv->lock); + g_object_unref (webaccess); + g_free (t); + return NULL; + } g_object_unref (webaccess); - g_free (t); - return NULL; } - g_object_unref (webaccess); } return t; diff --git a/tests/conf/ccnet.conf b/tests/conf/ccnet.conf index 491c37d..e3acf98 100644 --- a/tests/conf/ccnet.conf +++ b/tests/conf/ccnet.conf @@ -12,6 +12,12 @@ PORT = 9999 [Database] CREATE_TABLES = true +ENGINE = mysql +HOST = 127.0.0.1 +USER = seafile +PASSWD = seafile +DB = ccnet_db +CONNECTION_CHARSET=utf8 #[Database] #ENGINE = mysql diff --git a/tests/test_file_operation/test_merge_virtual_repo.py b/tests/test_file_operation/test_merge_virtual_repo.py new file mode 100644 index 0000000..7e40001 --- /dev/null +++ b/tests/test_file_operation/test_merge_virtual_repo.py @@ -0,0 +1,207 @@ +import pytest +import requests +import os +import time +from tests.config import USER, USER2 +from seaserv import seafile_api as api +from requests_toolbelt import MultipartEncoder + +file_name = 'file.txt' +file_name_not_replaced = 'file (1).txt' +file_path = os.getcwd() + '/' + file_name +file_content = 'File content.\r\n' +file_size = len(file_content) + +resumable_file_name = 'resumable.txt' +resumable_test_file_name = 'test/resumable.txt' +chunked_part1_name = 'part1.txt' +chunked_part2_name = 'part2.txt' +chunked_part1_path = os.getcwd() + '/' + chunked_part1_name +chunked_part2_path = os.getcwd() + '/' + chunked_part2_name +chunked_part1_content = 'First line.\r\n' +chunked_part2_content = 'Second line.\r\n' +total_size = len(chunked_part1_content) + len(chunked_part2_content) + +#File_id is not used when upload files, but +#the argument obj_id of get_fileserver_access_token shouldn't be NULL. +file_id = '0000000000000000000000000000000000000000' + +def create_test_file(): + fp = open(file_path, 'w') + fp.close() + fp = open(chunked_part1_path, 'w') + fp.close() + fp = open(chunked_part2_path, 'w') + fp.close() + +def create_test_dir(repo, dir_name): + parent_dir = '/' + api.post_dir(repo.id,parent_dir,dir_name,USER) + +def assert_upload_response(response, replace, file_exist): + assert response.status_code == 200 + response_json = response.json() + assert response_json[0]['size'] == 0 + assert response_json[0]['id'] == file_id + if file_exist and not replace: + assert response_json[0]['name'] == file_name_not_replaced + else: + assert response_json[0]['name'] == file_name + +def assert_resumable_upload_response(response, repo_id, file_name, upload_complete): + assert response.status_code == 200 + if not upload_complete: + assert response.text == '{"success": true}' + offset = api.get_upload_tmp_file_offset(repo_id, '/' + file_name) + assert offset == len(chunked_part1_content) + else: + response_json = response.json() + assert response_json[0]['size'] == total_size + new_file_id = response_json[0]['id'] + assert len(new_file_id) == 40 and new_file_id != file_id + assert response_json[0]['name'] == resumable_file_name + +def assert_update_response(response, is_json): + assert response.status_code == 200 + if is_json: + response_json = response.json() + assert response_json[0]['size'] == file_size + new_file_id = response_json[0]['id'] + assert len(new_file_id) == 40 and new_file_id != file_id + assert response_json[0]['name'] == file_name + else: + new_file_id = response.text + assert len(new_file_id) == 40 and new_file_id != file_id + +def request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_ajax): + write_file(chunked_part1_path, chunked_part1_content) + write_file(chunked_part2_path, chunked_part2_content) + + m = MultipartEncoder( + fields={ + 'parent_dir': parent_dir, + 'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream') + }) + params = {'ret-json':'1'} + headers["Content-type"] = m.content_type + if is_ajax: + response = requests.post(upload_url_base, headers = headers, + data = m) + else: + response = requests.post(upload_url_base, headers = headers, + data = m, params = params) + return response + +def write_file(file_path, file_content): + fp = open(file_path, 'w') + fp.write(file_content) + fp.close() + +def del_local_files(): + os.remove(file_path) + os.remove(chunked_part1_path) + os.remove(chunked_part2_path) + +def test_merge_virtual_repo(repo): + api.post_dir(repo.id, '/dir1', 'subdir1', USER) + api.post_dir(repo.id, '/dir2', 'subdir2', USER) + v_repo_id = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, 'rw') + + create_test_file() + params = {'ret-json':'1'} + obj_id = '{"parent_dir":"/"}' + create_test_dir(repo,'test') + + #test upload file to vritual repo root dir. + token = api.get_fileserver_access_token(v_repo_id, obj_id, 'upload', USER2, False) + upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(upload_url_base, params = params, + data = m, headers = {'Content-Type': m.content_type}) + assert_upload_response(response, False, False) + + time.sleep (1.5) + repo_size = api.get_repo_size (v_repo_id) + assert repo_size == 0 + + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + + #test resumable upload file to virtual repo root dir + parent_dir = '/' + headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1), + str(total_size)), + 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} + response = request_resumable_upload(chunked_part1_path,headers, upload_url_base,parent_dir, False) + assert_resumable_upload_response(response, v_repo_id, + resumable_file_name, False) + + time.sleep (1.5) + v_repo_size = api.get_repo_size (v_repo_id) + assert v_repo_size == 0 + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + + headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)), + str(total_size - 1), + str(total_size)), + 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} + response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False) + assert_resumable_upload_response(response, v_repo_id, + resumable_file_name, True) + + time.sleep (2.5) + v_repo_size = api.get_repo_size (v_repo_id) + assert v_repo_size == total_size + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == total_size + + #test update file to virtual repo. + write_file(file_path, file_content) + token = api.get_fileserver_access_token(v_repo_id, obj_id, 'update', USER2, False) + update_url_base = 'http://127.0.0.1:8082/update-api/' + token + m = MultipartEncoder( + fields={ + 'target_file': '/' + file_name, + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(update_url_base, + data = m, headers = {'Content-Type': m.content_type}) + assert_update_response(response, False) + + time.sleep (1.5) + v_repo_size = api.get_repo_size (v_repo_id) + assert v_repo_size == total_size + file_size + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == total_size + file_size + + api.del_file(v_repo_id, '/', file_name, USER2) + + time.sleep (1.5) + v_repo_size = api.get_repo_size (v_repo_id) + assert v_repo_size == total_size + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == total_size + + api.del_file(v_repo_id, '/', resumable_file_name, USER2) + + time.sleep (1.5) + v_repo_size = api.get_repo_size (v_repo_id) + assert v_repo_size == 0 + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + + api.del_file(repo.id, '/dir1', 'subdir1', USER) + api.del_file(repo.id, '/dir2', 'subdir1', USER) + assert api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0 + del_local_files() diff --git a/tests/test_file_operation/test_upload_and_update.py b/tests/test_file_operation/test_upload_and_update.py index fef2748..f5900bc 100644 --- a/tests/test_file_operation/test_upload_and_update.py +++ b/tests/test_file_operation/test_upload_and_update.py @@ -1,8 +1,10 @@ import pytest import requests import os +import time from tests.config import USER from seaserv import seafile_api as api +from requests_toolbelt import MultipartEncoder file_name = 'file.txt' file_name_not_replaced = 'file (1).txt' @@ -75,15 +77,19 @@ def request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_aja write_file(chunked_part1_path, chunked_part1_content) write_file(chunked_part2_path, chunked_part2_content) - files = {'file': open(filepath, 'rb'), - 'parent_dir':parent_dir} + m = MultipartEncoder( + fields={ + 'parent_dir': parent_dir, + 'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream') + }) params = {'ret-json':'1'} + headers["Content-type"] = m.content_type if is_ajax: response = requests.post(upload_url_base, headers = headers, - files = files) + data = m) else: response = requests.post(upload_url_base, headers = headers, - files = files, params = params) + data = m, params = params) return response def write_file(file_path, file_content): @@ -110,58 +116,106 @@ def test_ajax(repo): #test upload file to test dir. token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token - files = {'file': open(file_path, 'rb'), - 'parent_dir':'/test'} - response = requests.post(upload_url_base, files = files) + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(upload_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir. token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token - files = {'file': open(file_path, 'rb'), - 'parent_dir':'/'} - response = requests.post(upload_url_base, files = files) + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(upload_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #test upload file to test dir when file already exists. - files = {'file': open(file_path, 'rb'), - 'parent_dir':'/test'} - response = requests.post(upload_url_base, files = files) + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(upload_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir when file already exists. - files = {'file': open(file_path, 'rb'), - 'parent_dir':'/'} - response = requests.post(upload_url_base, files = files) + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(upload_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, True) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #test upload file to subdir whose parent is test dir. - files = {'file': open(file_path, 'rb'), - 'parent_dir':'/test', - 'relative_path':'subdir'} - response = requests.post(upload_url_base, files = files) + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'relative_path':'subdir', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(upload_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to subdir whose parent is root dir. - files = {'file': open(file_path, 'rb'), - 'parent_dir':'/', - 'relative_path':'subdir'} - response = requests.post(upload_url_base, files = files) + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'relative_path':'subdir', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(upload_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #test upload file to subdir whose parent is test dir when file already exists. - files = {'file': open(file_path, 'rb'), - 'parent_dir':'/test', - 'relative_path':'subdir'} - response = requests.post(upload_url_base, files = files) + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'relative_path':'subdir', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(upload_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to subdir whose parent is root dir when file already exists. - files = {'file': open(file_path, 'rb'), - 'parent_dir':'/', - 'relative_path':'subdir'} - response = requests.post(upload_url_base, files = files) + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'relative_path':'subdir', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(upload_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, True) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #test resumable upload file to test dir parent_dir = '/test' headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1), @@ -187,25 +241,39 @@ def test_ajax(repo): assert_resumable_upload_response(response, repo.id, resumable_file_name, False) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)), str(total_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} - response = requests.post(upload_url_base, headers = headers, - files = files) response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, True) assert_resumable_upload_response(response, repo.id, resumable_file_name, True) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == total_size + #test update file. write_file(file_path, file_content) token = api.get_fileserver_access_token(repo.id, obj_id, 'update', USER, False) update_url_base = 'http://127.0.0.1:8082/update-aj/' + token - files = {'file': open(file_path, 'rb'), - 'target_file':'/' + file_name} - response = requests.post(update_url_base, files = files) + m = MultipartEncoder( + fields={ + 'target_file': '/' + file_name, + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(update_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert_update_response(response, True) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == total_size + file_size + + time.sleep(1) del_repo_files(repo.id) del_local_files() @@ -217,101 +285,172 @@ def test_api(repo): #test upload file to test dir instead of root dir. token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/test'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir. + params = {'ret-json':'1'} token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #test upload file to test dir instead of root dir when file already exists and replace is set. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/test', - 'replace':'1'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'replace': '1', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir when file already exists and replace is set. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/', - 'replace':'1'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'replace': '1', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, True, True) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #test upload file to test dir instead of root dir when file already exists and replace is unset. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/test'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir when file already exists and replace is unset. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, True) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #test upload the file to subdir whose parent is test. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/test', - 'relative_path':'subdir'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'relative_path': 'subdir', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload the file to subdir. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/', - 'relative_path':'subdir'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'relative_path': 'subdir', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #test upload the file to subdir whose parent is test when file already exists and replace is set. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/test', - 'relative_path':'subdir', - 'replace':'1'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'relative_path': 'subdir', + 'replace': '1', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload the file to subdir when file already exists and replace is set. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/', - 'relative_path':'subdir', - 'replace':'1'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'relative_path': 'subdir', + 'replace': '1', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, True, True) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #unset test upload the file to subdir whose parent is test dir when file already exists and replace is unset. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/test', - 'relative_path':'subdir'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/test', + 'relative_path': 'subdir', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #unset test upload the file to subdir when file already exists and replace is unset. - files = {'file':open(file_path, 'rb'), - 'parent_dir':'/', - 'relative_path':'subdir'} + params = {'ret-json':'1'} + m = MultipartEncoder( + fields={ + 'parent_dir': '/', + 'relative_path': 'subdir', + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) response = requests.post(upload_url_base, params = params, - files = files) + data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, True) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + #test resumable upload file to test parent_dir = '/test' headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1), @@ -321,6 +460,10 @@ def test_api(repo): assert_resumable_upload_response(response, repo.id, resumable_test_file_name, False) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)), str(total_size - 1), str(total_size)), @@ -337,6 +480,9 @@ def test_api(repo): assert_resumable_upload_response(response, repo.id, resumable_file_name, False) + repo_size = api.get_repo_size (repo.id) + assert repo_size == 0 + headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)), str(total_size - 1), str(total_size)), @@ -345,14 +491,27 @@ def test_api(repo): assert_resumable_upload_response(response, repo.id, resumable_file_name, True) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == total_size + #test update file. write_file(file_path, file_content) token = api.get_fileserver_access_token(repo.id, obj_id, 'update', USER, False) update_url_base = 'http://127.0.0.1:8082/update-api/' + token - files = {'file':open(file_path, 'rb'), - 'target_file':'/' + file_name} - response = requests.post(update_url_base, files = files) + m = MultipartEncoder( + fields={ + 'target_file': '/' + file_name, + 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') + }) + response = requests.post(update_url_base, + data = m, headers = {'Content-Type': m.content_type}) assert_update_response(response, False) + time.sleep (1.5) + repo_size = api.get_repo_size (repo.id) + assert repo_size == total_size + file_size + + time.sleep(1) del_repo_files(repo.id) del_local_files() diff --git a/tests/test_file_operation/test_zip_download.py b/tests/test_file_operation/test_zip_download.py index 91b5362..3082379 100644 --- a/tests/test_file_operation/test_zip_download.py +++ b/tests/test_file_operation/test_zip_download.py @@ -43,15 +43,8 @@ def test_zip_download(): obj_id_json_str = json.dumps(obj_id) token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str, 'download-dir', USER) - while True: - time.sleep(0.5) - progress_json_str = api.query_zip_progress(token) - progress = json.loads(progress_json_str) - if progress['zipped'] != progress['total']: - continue - assert progress['zipped'] == 2 and progress['total'] == 2 - break + time.sleep(1) download_url = base_url + 'zip/' + token response = requests.get(download_url) assert response.status_code == 200 @@ -88,15 +81,7 @@ def test_zip_download(): obj_id_json_str = json.dumps(obj_id) token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str, 'download-dir', USER) - while True: - time.sleep(0.5) - progress_json_str = api.query_zip_progress(token) - progress = json.loads(progress_json_str) - if progress['zipped'] != progress['total']: - continue - assert progress['zipped'] == 0 and progress['total'] == 0 - break - + time.sleep(1) download_url = base_url + 'zip/' + token response = requests.get(download_url) assert response.status_code == 200 @@ -121,15 +106,8 @@ def test_zip_download(): obj_id_json_str = json.dumps(obj_id) token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str, 'download-multi', USER) - while True: - time.sleep(0.5) - progress_json_str = api.query_zip_progress(token) - progress = json.loads(progress_json_str) - if progress['zipped'] != progress['total']: - continue - assert progress['zipped'] == 2 and progress['total'] == 2 - break + time.sleep(1) download_url = base_url + 'zip/' + token response = requests.get(download_url) assert response.status_code == 200