1
0
mirror of https://github.com/haiwen/seafile-server.git synced 2025-09-14 22:29:14 +00:00

Go fileserver (#437)

* Initial commit for fileserver written in golang.

[gofileserver] Fix some syntaxt errors.

Add fs backend and objstore test (#352)

* Add fs backend and objstore test

* modify test case and optimize fs backend

* Modify function name and first write temporary files

* Don't need to reopen the temp files

Add comment for objstore (#354)

* Add comment for objstore

* Modify comment

Add commitmgr and test case (#356)

* Add commitmgr and test case

* Redefine the interface

* Modify comment and interface

* Modify parameter and del unused method

* Add comment for FromData and ToData

Add blockmgr and test case (#357)

* Add blockmgr and test case

* Modify comment and interface

Add fsmgr and test case (#358)

* Add fsmgr and test case

* Add save interface and error details

* Modify errors and comments

Add searpc package and test case (#360)

* Add searpc package

* Add searpc test case

* Add return error and add Request struct

* Modify returned error

* Modify comments

add checkPerm (#369)

Add file and block download (#363)

* Add file and block download

* Modify init and use aes algorithm

* Get block by offset and add stat method

* Modify objID's type

* Fix reset pos after add start

* Add http error handing and record log when failed to read block or write block to response

* Modify http return code and value names

* Modify http return code and add log info

* Block read add comment and only repeat once

load ccnetdb and support sqlite (#371)

Add zip download (#372)

* Add zip download

* Modify pack dir and log info

* Modify http return code and use Deflate zip compression methods

add /repo/<repo-id>/permission-check (#375)

add /<repo-id>/commit/HEAD (#377)

add  /repo/<repo-id>/commit/<id> (#379)

add /repo/<repo-id>/block/<id> (#380)

add /repo/<repo-id>/fs-id-list (#383)

add /repo/head-commits-multi (#388)

Add file upload api (#378)

* Add file upload api

* Upload api implements post multi files and create relative path

* Modify handle error and save files directly

* Fix rebase conflict

* index block use channel and optimize mkdir with parents

* Handle jobs and results in a loop

* Mkdir with parents use postMultiFiles and use pointer of SeafDirent

* Del diff_simple size_sched virtual_repo

* Need to check the path with and without slash

* Modify merge trees and add merge test case

* Del postFile and don't close results channel

* Close the file and remove multipart temp file

* Modify merge test case and compare the first name of path

* Use pointer of Entries for SeafDir

* Add test cases for different situations

add /repo/<repo-id>/pack-fs (#389)

add POST /<repo-id>/check-fs and /<repo-id>/check-blocks (#396)

Merge compute repo (#397)

* Add update repo size and merge virtual repo

* Eliminate lint warnings

* Uncomment merge virtual repo and compute repo size

* Need init the dents

* Use interface{} param and modify removeElems

* Move update dir to file.go and modify logs

* Del sync pkg

add PUT /<repo-id>/commit/<commit-id> (#400)

add PUT /<repo-id>/block/<id> (#401)

add POST /<repo-id>/recv-fs (#398)

add PUT /<repo-id>/commit/HEAD (#402)

Add http return code (#403)

Add file update API (#399)

* Add file update API

* Add GetObjIDByPath and fix change size error

* Add traffic statistics for update api

add diffTrees unit test (#391)

add GET /accessible-repos (#406)

add GET /<repo-id>/block-map/<file-id> (#405)

Add test update repo size and merge virtual repo (#409)

* Update dir need update repo size

* Add test update repo size and merge virtual repo

* Add delay for test ajax

* Add delay before get repo size and modify comment

Use go fileserver for unit test (#410)

* Use go fileserver for unit test

* Blocking scheduling update repo size

* Add delay because of sqlite doesn't support concurrency

* Post use multipart form encode

* Del mysql database when test finished

* Fix merge virtual repo failed when use sqlite3

Add upload block API (#412)

fixed error

Add quota-check API (#426)

use diff package

* Use central conf for go fileserver (#428)

* Use central conf for go fileserver

* Fix log error

* use store id and remove share get repo owner (#430)

* Fix permission error (#432)

Co-authored-by: feiniks <36756310+feiniks@users.noreply.github.com>
Co-authored-by: Xiangyue Cai <caixiangyue007@gmail.com>
This commit is contained in:
Jiaqiang Xu
2021-01-04 11:41:53 +08:00
committed by GitHub
parent 3130d27b5d
commit 7420b8d738
40 changed files with 10781 additions and 148 deletions

View File

@@ -4,3 +4,4 @@ pytest>=3.3.2
backports.functools_lru_cache>=1.4
tenacity>=4.8.0
future
requests-toolbelt

View File

@@ -212,15 +212,19 @@ def main():
else:
dbs = ('sqlite3',)
for db in dbs:
shell('rm -rf {}/*'.format(INSTALLDIR))
start_and_test_with_db(db)
def start_and_test_with_db(db):
info('Setting up seafile server with %s database', db)
fileservers = ('go_fileserver', 'c_fileserver')
for fileserver in fileservers:
shell('rm -rf {}/*'.format(INSTALLDIR))
info('Setting up seafile server with %s database, use %s', db, fileserver)
server = ServerCtl(
TOPDIR,
SeafileServer().projectdir,
INSTALLDIR,
fileserver,
db=db,
# Use the newly built seaf-server (to avoid "make install" each time when developping locally)
seaf_server_bin=join(SeafileServer().projectdir, 'server/seaf-server')

View File

@@ -22,7 +22,7 @@ logger = logging.getLogger(__name__)
class ServerCtl(object):
def __init__(self, topdir, datadir, db='sqlite3', seaf_server_bin='seaf-server', ccnet_server_bin='ccnet-server'):
def __init__(self, topdir, projectdir, datadir, fileserver, db='sqlite3', seaf_server_bin='seaf-server', ccnet_server_bin='ccnet-server'):
self.db = db
self.datadir = datadir
self.central_conf_dir = join(datadir, 'conf')
@@ -33,6 +33,7 @@ class ServerCtl(object):
mkdirs(self.log_dir)
self.ccnet_log = join(self.log_dir, 'ccnet.log')
self.seafile_log = join(self.log_dir, 'seafile.log')
self.fileserver_log = join(self.log_dir, 'fileserver.log')
self.ccnet_server_bin = ccnet_server_bin
self.seaf_server_bin = seaf_server_bin
@@ -41,6 +42,9 @@ class ServerCtl(object):
self.ccnet_proc = None
self.seafile_proc = None
self.fileserver_proc = None
self.projectdir = projectdir
self.fileserver = fileserver
def setup(self):
if self.db == 'mysql':
@@ -86,6 +90,13 @@ CONNECTION_CHARSET = utf8
def init_seafile(self):
seafile_conf = join(self.central_conf_dir, 'seafile.conf')
if self.fileserver == 'go_fileserver':
seafile_fileserver_conf = '''\
[fileserver]
use_go_fileserver = true
port=8082
'''
else:
seafile_fileserver_conf = '''\
[fileserver]
port=8082
@@ -150,6 +161,7 @@ connection_charset = utf8
self.create_database_tables()
logger.info('Starting seafile server')
self.start_seafile()
self.start_fileserver()
def create_database_tables(self):
if self.db == 'mysql':
@@ -217,6 +229,22 @@ connection_charset = utf8
]
self.seafile_proc = shell(cmd, wait=False)
def start_fileserver(self):
cmd = [
"./fileserver",
"-F",
self.central_conf_dir,
"-d",
self.seafile_conf_dir,
"-l",
self.fileserver_log,
]
fileserver_path = join(self.projectdir, 'fileserver')
with cd(fileserver_path):
shell("go build")
self.fileserver_proc = shell(cmd, wait=False)
def stop(self):
if self.ccnet_proc:
logger.info('Stopping ccnet server')
@@ -224,6 +252,11 @@ connection_charset = utf8
if self.seafile_proc:
logger.info('Stopping seafile server')
self.seafile_proc.kill()
if self.fileserver_proc:
logger.info('Stopping go fileserver')
self.fileserver_proc.kill()
if self.db == 'mysql':
del_mysql_dbs()
def get_seaserv_envs(self):
envs = dict(os.environ)
@@ -247,3 +280,12 @@ GRANT ALL PRIVILEGES ON `seafile`.* to `seafile`@localhost;
'''
shell('sudo mysql -u root -proot', inputdata=sql)
def del_mysql_dbs():
sql = b'''\
drop database `ccnet`;
drop database `seafile`;
drop user 'seafile'@'localhost';
'''
shell('sudo mysql -u root -proot', inputdata=sql)

View File

@@ -0,0 +1,46 @@
// Package blockmgr provides operations on blocks
package blockmgr
import (
"github.com/haiwen/seafile-server/fileserver/objstore"
"io"
)
var store *objstore.ObjectStore
// Init initializes block manager and creates underlying object store.
func Init(seafileConfPath string, seafileDataDir string) {
store = objstore.New(seafileConfPath, seafileDataDir, "blocks")
}
// Read reads block from storage backend.
func Read(repoID string, blockID string, w io.Writer) error {
err := store.Read(repoID, blockID, w)
if err != nil {
return err
}
return nil
}
// Write writes block to storage backend.
func Write(repoID string, blockID string, r io.Reader) error {
err := store.Write(repoID, blockID, r, false)
if err != nil {
return err
}
return nil
}
// Exists checks block if exists.
func Exists(repoID string, blockID string) bool {
ret, _ := store.Exists(repoID, blockID)
return ret
}
// Stat calculates block size.
func Stat(repoID string, blockID string) (int64, error) {
ret, err := store.Stat(repoID, blockID)
return ret, err
}

View File

@@ -0,0 +1,103 @@
package blockmgr
import (
"bytes"
"fmt"
"os"
"path"
"testing"
)
const (
blockID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
seafileConfPath = "/tmp/conf"
seafileDataDir = "/tmp/conf/seafile-data"
testFile = "output.data"
)
func delFile() error {
err := os.Remove(testFile)
if err != nil {
return err
}
err = os.RemoveAll(seafileConfPath)
if err != nil {
return err
}
return nil
}
func createFile() error {
outputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
defer outputFile.Close()
outputString := "hello world!\n"
for i := 0; i < 10; i++ {
outputFile.WriteString(outputString)
}
return nil
}
func TestMain(m *testing.M) {
err := createFile()
if err != nil {
fmt.Printf("Failed to create test file : %v\n", err)
os.Exit(1)
}
code := m.Run()
err = delFile()
if err != nil {
fmt.Printf("Failed to remove test file : %v\n", err)
os.Exit(1)
}
os.Exit(code)
}
func testBlockRead(t *testing.T) {
var buf bytes.Buffer
err := Read(repoID, blockID, &buf)
if err != nil {
t.Errorf("Failed to read block.\n")
}
}
func testBlockWrite(t *testing.T) {
inputFile, err := os.Open(testFile)
if err != nil {
t.Errorf("Failed to open test file : %v\n", err)
}
defer inputFile.Close()
err = Write(repoID, blockID, inputFile)
if err != nil {
t.Errorf("Failed to write block.\n")
}
}
func testBlockExists(t *testing.T) {
ret := Exists(repoID, blockID)
if !ret {
t.Errorf("Block is not exist\n")
}
filePath := path.Join(seafileDataDir, "storage", "blocks", repoID, blockID[:2], blockID[2:])
fileInfo, _ := os.Stat(filePath)
if fileInfo.Size() != 130 {
t.Errorf("Block is exist, but the size of file is incorrect.\n")
}
}
func TestBlock(t *testing.T) {
Init(seafileConfPath, seafileDataDir)
testBlockWrite(t)
testBlockRead(t)
testBlockExists(t)
}

View File

@@ -0,0 +1,159 @@
// Package commitmgr manages commit objects.
package commitmgr
import (
"bytes"
"crypto/sha1"
"encoding/binary"
"encoding/hex"
"encoding/json"
"io"
"time"
"github.com/haiwen/seafile-server/fileserver/objstore"
)
// Commit is a commit object
type Commit struct {
CommitID string `json:"commit_id"`
RepoID string `json:"repo_id"`
RootID string `json:"root_id"`
CreatorName string `json:"creator_name,omitempty"`
CreatorID string `json:"creator"`
Desc string `json:"description"`
Ctime int64 `json:"ctime"`
ParentID string `json:"parent_id,omitempty"`
SecondParentID string `json:"second_parent_id,omitempty"`
RepoName string `json:"repo_name"`
RepoDesc string `json:"repo_desc"`
RepoCategory string `json:"repo_category"`
DeviceName string `json:"device_name,omitempty"`
ClientVersion string `json:"client_version,omitempty"`
Encrypted string `json:"encrypted,omitempty"`
EncVersion int `json:"enc_version,omitempty"`
Magic string `json:"magic,omitempty"`
RandomKey string `json:"key,omitempty"`
Salt string `json:"salt,omitempty"`
Version int `json:"version,omitempty"`
Conflict int `json:"conflict,omitempty"`
NewMerge int `json:"new_merge,omitempty"`
Repaired int `json:"repaired,omitempty"`
}
var store *objstore.ObjectStore
// Init initializes commit manager and creates underlying object store.
func Init(seafileConfPath string, seafileDataDir string) {
store = objstore.New(seafileConfPath, seafileDataDir, "commits")
}
// NewCommit initializes a Commit object.
func NewCommit(repoID, parentID, newRoot, user, desc string) *Commit {
commit := new(Commit)
commit.RepoID = repoID
commit.RootID = newRoot
commit.Desc = desc
commit.CreatorName = user
commit.CreatorID = "0000000000000000000000000000000000000000"
commit.Ctime = time.Now().Unix()
commit.CommitID = computeCommitID(commit)
commit.ParentID = parentID
return commit
}
func computeCommitID(commit *Commit) string {
hash := sha1.New()
hash.Write([]byte(commit.RootID))
hash.Write([]byte(commit.CreatorID))
hash.Write([]byte(commit.CreatorName))
hash.Write([]byte(commit.Desc))
tmpBuf := make([]byte, 8)
binary.BigEndian.PutUint64(tmpBuf, uint64(commit.Ctime))
hash.Write(tmpBuf)
checkSum := hash.Sum(nil)
id := hex.EncodeToString(checkSum[:])
return id
}
// FromData reads from p and converts JSON-encoded data to commit.
func (commit *Commit) FromData(p []byte) error {
err := json.Unmarshal(p, commit)
if err != nil {
return err
}
return nil
}
// ToData converts commit to JSON-encoded data and writes to w.
func (commit *Commit) ToData(w io.Writer) error {
jsonstr, err := json.Marshal(commit)
if err != nil {
return err
}
_, err = w.Write(jsonstr)
if err != nil {
return err
}
return nil
}
// ReadRaw reads data in binary format from storage backend.
func ReadRaw(repoID string, commitID string, w io.Writer) error {
err := store.Read(repoID, commitID, w)
if err != nil {
return err
}
return nil
}
// WriteRaw writes data in binary format to storage backend.
func WriteRaw(repoID string, commitID string, r io.Reader) error {
err := store.Write(repoID, commitID, r, false)
if err != nil {
return err
}
return nil
}
// Load commit from storage backend.
func Load(repoID string, commitID string) (*Commit, error) {
var buf bytes.Buffer
commit := new(Commit)
err := ReadRaw(repoID, commitID, &buf)
if err != nil {
return nil, err
}
err = commit.FromData(buf.Bytes())
if err != nil {
return nil, err
}
return commit, nil
}
// Save commit to storage backend.
func Save(commit *Commit) error {
var buf bytes.Buffer
err := commit.ToData(&buf)
if err != nil {
return err
}
err = WriteRaw(commit.RepoID, commit.CommitID, &buf)
if err != nil {
return err
}
return err
}
// Exists checks commit if exists.
func Exists(repoID string, commitID string) (bool, error) {
return store.Exists(repoID, commitID)
}

View File

@@ -0,0 +1,67 @@
package commitmgr
import (
"fmt"
"os"
"testing"
"time"
)
const (
commitID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
seafileConfPath = "/tmp/conf"
seafileDataDir = "/tmp/conf/seafile-data"
)
func delFile() error {
err := os.RemoveAll(seafileConfPath)
if err != nil {
return err
}
return nil
}
func TestMain(m *testing.M) {
code := m.Run()
err := delFile()
if err != nil {
fmt.Printf("Failed to remove test file : %v\n", err)
os.Exit(1)
}
os.Exit(code)
}
func assertEqual(t *testing.T, a, b interface{}) {
if a != b {
t.Errorf("Not Equal.%t,%t", a, b)
}
}
func TestCommit(t *testing.T) {
Init(seafileConfPath, seafileDataDir)
newCommit := new(Commit)
newCommit.CommitID = commitID
newCommit.RepoID = repoID
newCommit.CreatorName = "seafile"
newCommit.CreatorID = commitID
newCommit.Desc = "This is a commit"
newCommit.Ctime = time.Now().Unix()
newCommit.ParentID = commitID
newCommit.DeviceName = "Linux"
err := Save(newCommit)
if err != nil {
t.Errorf("Failed to save commit.\n")
}
commit, err := Load(repoID, commitID)
if err != nil {
t.Errorf("Failed to load commit.\n")
}
assertEqual(t, commit.CommitID, commitID)
assertEqual(t, commit.RepoID, repoID)
assertEqual(t, commit.CreatorName, "seafile")
assertEqual(t, commit.CreatorID, commitID)
assertEqual(t, commit.ParentID, commitID)
}

47
fileserver/crypt.go Normal file
View File

@@ -0,0 +1,47 @@
package main
import (
"bytes"
"crypto/aes"
"crypto/cipher"
)
func pkcs7Padding(p []byte, blockSize int) []byte {
padding := blockSize - len(p)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(p, padtext...)
}
func pkcs7UnPadding(p []byte) []byte {
length := len(p)
paddLen := int(p[length-1])
return p[:(length - paddLen)]
}
func decrypt(input, key, iv []byte) ([]byte, error) {
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
out := make([]byte, len(input))
blockMode := cipher.NewCBCDecrypter(block, iv)
blockMode.CryptBlocks(out, input)
out = pkcs7UnPadding(out)
return out, nil
}
func encrypt(input, key, iv []byte) ([]byte, error) {
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
input = pkcs7Padding(input, block.BlockSize())
out := make([]byte, len(input))
blockMode := cipher.NewCBCEncrypter(block, iv)
blockMode.CryptBlocks(out, input)
return out, nil
}

597
fileserver/diff/diff.go Normal file
View File

@@ -0,0 +1,597 @@
package diff
import (
"fmt"
"path/filepath"
"strings"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/repomgr"
)
// Empty value of sha1
const (
EmptySha1 = "0000000000000000000000000000000000000000"
)
type fileCB func(string, []*fsmgr.SeafDirent, interface{}) error
type dirCB func(string, []*fsmgr.SeafDirent, interface{}, *bool) error
type DiffOptions struct {
FileCB fileCB
DirCB dirCB
RepoID string
Data interface{}
}
type diffData struct {
foldDirDiff bool
results *[]*DiffEntry
}
func DiffTrees(roots []string, opt *DiffOptions) error {
n := len(roots)
if n != 2 && n != 3 {
err := fmt.Errorf("the number of commit trees is illegal")
return err
}
trees := make([]*fsmgr.SeafDir, n)
for i := 0; i < n; i++ {
root, err := fsmgr.GetSeafdir(opt.RepoID, roots[i])
if err != nil {
err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, roots[i])
return err
}
trees[i] = root
}
return diffTreesRecursive(trees, "", opt)
}
func diffTreesRecursive(trees []*fsmgr.SeafDir, baseDir string, opt *DiffOptions) error {
n := len(trees)
ptrs := make([][]*fsmgr.SeafDirent, 3)
for i := 0; i < n; i++ {
if trees[i] != nil {
ptrs[i] = trees[i].Entries
} else {
ptrs[i] = nil
}
}
var firstName string
var done bool
var offset = make([]int, n)
for {
dents := make([]*fsmgr.SeafDirent, 3)
firstName = ""
done = true
for i := 0; i < n; i++ {
if len(ptrs[i]) > offset[i] {
done = false
dent := ptrs[i][offset[i]]
if firstName == "" {
firstName = dent.Name
} else if strings.Compare(dent.Name, firstName) > 0 {
firstName = dent.Name
}
}
}
if done {
break
}
for i := 0; i < n; i++ {
if len(ptrs[i]) > offset[i] {
dent := ptrs[i][offset[i]]
if firstName == dent.Name {
dents[i] = dent
offset[i]++
}
}
}
if n == 2 && dents[0] != nil && dents[1] != nil &&
direntSame(dents[0], dents[1]) {
continue
}
if n == 3 && dents[0] != nil && dents[1] != nil &&
dents[2] != nil && direntSame(dents[0], dents[1]) &&
direntSame(dents[0], dents[2]) {
continue
}
if err := diffFiles(baseDir, dents, opt); err != nil {
return err
}
if err := diffDirectories(baseDir, dents, opt); err != nil {
return err
}
}
return nil
}
func diffFiles(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error {
n := len(dents)
var nFiles int
files := make([]*fsmgr.SeafDirent, 3)
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsRegular(dents[i].Mode) {
files[i] = dents[i]
nFiles++
}
}
if nFiles == 0 {
return nil
}
return opt.FileCB(baseDir, files, opt.Data)
}
func diffDirectories(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error {
n := len(dents)
dirs := make([]*fsmgr.SeafDirent, 3)
subDirs := make([]*fsmgr.SeafDir, 3)
var nDirs int
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {
dirs[i] = dents[i]
nDirs++
}
}
if nDirs == 0 {
return nil
}
recurse := true
err := opt.DirCB(baseDir, dirs, opt.Data, &recurse)
if err != nil {
err := fmt.Errorf("failed to call dir callback: %v", err)
return err
}
if !recurse {
return nil
}
var dirName string
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {
dir, err := fsmgr.GetSeafdir(opt.RepoID, dents[i].ID)
if err != nil {
err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, dents[i].ID)
return err
}
subDirs[i] = dir
dirName = dents[i].Name
}
}
newBaseDir := baseDir + dirName + "/"
return diffTreesRecursive(subDirs, newBaseDir, opt)
}
func direntSame(dentA, dentB *fsmgr.SeafDirent) bool {
return dentA.ID == dentB.ID &&
dentA.Mode == dentB.Mode &&
dentA.Mtime == dentA.Mtime
}
// Diff type and diff status.
const (
DiffTypeCommits = 'C' /* diff between two commits*/
DiffStatusAdded = 'A'
DiffStatusDeleted = 'D'
DiffStatusModified = 'M'
DiffStatusRenamed = 'R'
DiffStatusUnmerged = 'U'
DiffStatusDirAdded = 'B'
DiffStatusDirDeleted = 'C'
DiffStatusDirRenamed = 'E'
)
type DiffEntry struct {
DiffType rune
Status rune
Sha1 string
Name string
NewName string
Size int64
OriginSize int64
}
func diffEntryNewFromDirent(diffType, status rune, dent *fsmgr.SeafDirent, baseDir string) *DiffEntry {
de := new(DiffEntry)
de.Sha1 = dent.ID
de.DiffType = diffType
de.Status = status
de.Size = dent.Size
de.Name = filepath.Join(baseDir, dent.Name)
return de
}
func diffEntryNew(diffType, status rune, dirID, name string) *DiffEntry {
de := new(DiffEntry)
de.DiffType = diffType
de.Status = status
de.Sha1 = dirID
de.Name = name
return de
}
func DiffMergeRoots(storeID, mergedRoot, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error {
roots := []string{mergedRoot, p1Root, p2Root}
opt := new(DiffOptions)
opt.RepoID = storeID
opt.FileCB = threewayDiffFiles
opt.DirCB = threewayDiffDirs
opt.Data = diffData{foldDirDiff, results}
err := DiffTrees(roots, opt)
if err != nil {
err := fmt.Errorf("failed to diff trees: %v", err)
return err
}
diffResolveRenames(results)
return nil
}
func threewayDiffFiles(baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error {
m := dents[0]
p1 := dents[1]
p2 := dents[2]
data, ok := optData.(diffData)
if !ok {
err := fmt.Errorf("failed to assert diff data")
return err
}
results := data.results
if m != nil && p1 != nil && p2 != nil {
if !direntSame(m, p1) && !direntSame(m, p2) {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir)
*results = append(*results, de)
}
} else if m == nil && p1 != nil && p2 != nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir)
*results = append(*results, de)
} else if m != nil && p1 == nil && p2 != nil {
if !direntSame(m, p2) {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir)
*results = append(*results, de)
}
} else if m != nil && p1 != nil && p2 == nil {
if !direntSame(m, p1) {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir)
*results = append(*results, de)
}
} else if m != nil && p1 == nil && p2 == nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, m, baseDir)
*results = append(*results, de)
}
return nil
}
func threewayDiffDirs(baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error {
*recurse = true
return nil
}
func DiffCommitRoots(storeID, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error {
roots := []string{p1Root, p2Root}
opt := new(DiffOptions)
opt.RepoID = storeID
opt.FileCB = twowayDiffFiles
opt.DirCB = twowayDiffDirs
opt.Data = diffData{foldDirDiff, results}
err := DiffTrees(roots, opt)
if err != nil {
err := fmt.Errorf("failed to diff trees: %v", err)
return err
}
diffResolveRenames(results)
return nil
}
func DiffCommits(commit1, commit2 *commitmgr.Commit, results *[]*DiffEntry, foldDirDiff bool) error {
repo := repomgr.Get(commit1.RepoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %s", commit1.RepoID)
return err
}
roots := []string{commit1.RootID, commit2.RootID}
opt := new(DiffOptions)
opt.RepoID = repo.StoreID
opt.FileCB = twowayDiffFiles
opt.DirCB = twowayDiffDirs
opt.Data = diffData{foldDirDiff, results}
err := DiffTrees(roots, opt)
if err != nil {
err := fmt.Errorf("failed to diff trees: %v", err)
return err
}
diffResolveRenames(results)
return nil
}
func twowayDiffFiles(baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error {
p1 := dents[0]
p2 := dents[1]
data, ok := optData.(diffData)
if !ok {
err := fmt.Errorf("failed to assert diff data")
return err
}
results := data.results
if p1 == nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, p2, baseDir)
*results = append(*results, de)
return nil
}
if p2 == nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir)
*results = append(*results, de)
return nil
}
if !direntSame(p1, p2) {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, p2, baseDir)
de.OriginSize = p1.Size
*results = append(*results, de)
}
return nil
}
func twowayDiffDirs(baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error {
p1 := dents[0]
p2 := dents[1]
data, ok := optData.(diffData)
if !ok {
err := fmt.Errorf("failed to assert diff data")
return err
}
results := data.results
if p1 == nil {
if p2.ID == EmptySha1 || data.foldDirDiff {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirAdded, p2, baseDir)
*results = append(*results, de)
*recurse = false
} else {
*recurse = true
}
return nil
}
if p2 == nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirDeleted, p1, baseDir)
*results = append(*results, de)
if data.foldDirDiff {
*recurse = false
} else {
*recurse = true
}
}
return nil
}
func diffResolveRenames(des *[]*DiffEntry) error {
var deletedEmptyCount, deletedEmptyDirCount, addedEmptyCount, addedEmptyDirCount int
for _, de := range *des {
if de.Sha1 == EmptySha1 {
if de.Status == DiffStatusDeleted {
deletedEmptyCount++
}
if de.Status == DiffStatusDirDeleted {
deletedEmptyDirCount++
}
if de.Status == DiffStatusAdded {
addedEmptyCount++
}
if de.Status == DiffStatusDirAdded {
addedEmptyDirCount++
}
}
}
deletedFiles := make(map[string]*DiffEntry)
deletedDirs := make(map[string]*DiffEntry)
var results []*DiffEntry
var added []*DiffEntry
checkEmptyDir := (deletedEmptyDirCount == 1 && addedEmptyDirCount == 1)
checkEmptyFile := (deletedEmptyCount == 1 && addedEmptyCount == 1)
for _, de := range *des {
if de.Status == DiffStatusDeleted {
if de.Sha1 == EmptySha1 && !checkEmptyFile {
results = append(results, de)
continue
}
deletedFiles[de.Sha1] = de
}
if de.Status == DiffStatusDirDeleted {
if de.Sha1 == EmptySha1 && !checkEmptyDir {
results = append(results, de)
continue
}
deletedDirs[de.Sha1] = de
}
if de.Status == DiffStatusAdded {
if de.Sha1 == EmptySha1 && !checkEmptyFile {
results = append(results, de)
continue
}
added = append(added, de)
}
if de.Status == DiffStatusDirAdded {
if de.Sha1 == EmptySha1 && !checkEmptyDir {
results = append(results, de)
continue
}
added = append(added, de)
}
if de.Status == DiffStatusModified {
results = append(results, de)
}
}
for _, de := range added {
var deAdd, deDel, deRename *DiffEntry
var renameStatus rune
deAdd = de
if deAdd.Status == DiffStatusAdded {
deTmp, ok := deletedFiles[de.Sha1]
if !ok {
results = append(results, deAdd)
continue
}
deDel = deTmp
} else {
deTmp, ok := deletedDirs[de.Sha1]
if !ok {
results = append(results, deAdd)
continue
}
deDel = deTmp
}
if deAdd.Status == DiffStatusDirAdded {
renameStatus = DiffStatusDirRenamed
} else {
renameStatus = DiffStatusRenamed
}
deRename = diffEntryNew(deDel.DiffType, renameStatus, deDel.Sha1, deDel.Name)
deRename.NewName = de.Name
results = append(results, deRename)
if deDel.Status == DiffStatusDirDeleted {
delete(deletedDirs, deAdd.Sha1)
} else {
delete(deletedFiles, deAdd.Sha1)
}
}
for _, de := range deletedFiles {
results = append(results, de)
}
for _, de := range deletedDirs {
results = append(results, de)
}
*des = results
return nil
}
func DiffResultsToDesc(results []*DiffEntry) string {
var nAddMod, nRemoved, nRenamed int
var nNewDir, nRemovedDir int
var addModFile, removedFile string
var renamedFile string
var newDir, removedDir string
var desc string
if results == nil {
return ""
}
for _, de := range results {
switch de.Status {
case DiffStatusAdded:
if nAddMod == 0 {
addModFile = filepath.Base(de.Name)
}
nAddMod++
case DiffStatusDeleted:
if nRemoved == 0 {
removedFile = filepath.Base(de.Name)
}
nRemoved++
case DiffStatusRenamed:
if nRenamed == 0 {
renamedFile = filepath.Base(de.Name)
}
nRenamed++
case DiffStatusModified:
if nAddMod == 0 {
addModFile = filepath.Base(de.Name)
}
nAddMod++
case DiffStatusDirAdded:
if nNewDir == 0 {
newDir = filepath.Base(de.Name)
}
nNewDir++
case DiffStatusDirDeleted:
if nRemovedDir == 0 {
removedDir = filepath.Base(de.Name)
}
nRemovedDir++
}
}
if nAddMod == 1 {
desc = fmt.Sprintf("Added or modified \"%s\".\n", addModFile)
} else if nAddMod > 1 {
desc = fmt.Sprintf("Added or modified \"%s\" and %d more files.\n", addModFile, nAddMod-1)
}
if nRemoved == 1 {
desc += fmt.Sprintf("Deleted \"%s\".\n", removedFile)
} else if nRemoved > 1 {
desc += fmt.Sprintf("Deleted \"%s\" and %d more files.\n", removedFile, nRemoved-1)
}
if nRenamed == 1 {
desc += fmt.Sprintf("Renamed \"%s\".\n", renamedFile)
} else if nRenamed > 1 {
desc += fmt.Sprintf("Renamed \"%s\" and %d more files.\n", renamedFile, nRenamed-1)
}
if nNewDir == 1 {
desc += fmt.Sprintf("Added directory \"%s\".\n", newDir)
} else if nNewDir > 1 {
desc += fmt.Sprintf("Added \"%s\" and %d more directories.\n", newDir, nNewDir-1)
}
if nRemovedDir == 1 {
desc += fmt.Sprintf("Removed directory \"%s\".\n", removedDir)
} else if nRemovedDir > 1 {
desc += fmt.Sprintf("Removed \"%s\" and %d more directories.\n", removedDir, nRemovedDir-1)
}
return desc
}

View File

@@ -0,0 +1,281 @@
package diff
import (
"fmt"
"os"
"syscall"
"testing"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
)
const (
emptySHA1 = "0000000000000000000000000000000000000000"
diffTestSeafileConfPath = "/tmp/conf"
diffTestSeafileDataDir = "/tmp/conf/seafile-data"
diffTestRepoID = "0d18a711-c988-4f7b-960c-211b34705ce3"
)
var diffTestTree1 string
var diffTestTree2 string
var diffTestTree3 string
var diffTestTree4 string
var diffTestFileID string
var diffTestDirID1 string
var diffTestDirID2 string
/*
test directory structure:
tree1
|--
tree2
|--file
tree3
|--dir
tree4
|--dir
|-- file
*/
func TestDiffTrees(t *testing.T) {
fsmgr.Init(diffTestSeafileConfPath, diffTestSeafileDataDir)
err := diffTestCreateTestDir()
if err != nil {
fmt.Printf("failed to create test dir: %v", err)
os.Exit(1)
}
t.Run("test1", testDiffTrees1)
t.Run("test2", testDiffTrees2)
t.Run("test3", testDiffTrees3)
t.Run("test4", testDiffTrees4)
t.Run("test5", testDiffTrees5)
err = diffTestDelFile()
if err != nil {
fmt.Printf("failed to remove test file : %v", err)
}
}
func diffTestCreateTestDir() error {
modeDir := uint32(syscall.S_IFDIR | 0644)
modeFile := uint32(syscall.S_IFREG | 0644)
dir1, err := diffTestCreateSeafdir(nil)
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
diffTestTree1 = dir1
file1, err := fsmgr.NewSeafile(1, 1, nil)
if err != nil {
err := fmt.Errorf("failed to new seafile: %v", err)
return err
}
diffTestFileID = file1.FileID
err = fsmgr.SaveSeafile(diffTestRepoID, file1)
if err != nil {
err := fmt.Errorf("failed to save seafile: %v", err)
return err
}
dent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: "file", Mode: modeFile, Size: 1}
dir2, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
diffTestTree2 = dir2
dent2 := fsmgr.SeafDirent{ID: dir1, Name: "dir", Mode: modeDir}
diffTestDirID1 = dir1
dir3, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
diffTestTree3 = dir3
dent3 := fsmgr.SeafDirent{ID: dir2, Name: "dir", Mode: modeDir}
diffTestDirID2 = dir2
dir4, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
diffTestTree4 = dir4
return nil
}
func testDiffTrees1(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree2, diffTestTree1}, opt)
if len(results) != 1 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestFileID {
t.Errorf("result %s != %s", ret[0], diffTestFileID)
}
}
func testDiffTrees2(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree3, diffTestTree1}, opt)
if len(results) != 1 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestDirID1 {
t.Errorf("result %s != %s", ret[0], diffTestDirID1)
}
}
func testDiffTrees3(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree4, diffTestTree1}, opt)
if len(results) != 2 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestDirID2 {
t.Errorf("result %s != %s", ret[0], diffTestDirID2)
}
if ret[1] != diffTestFileID {
t.Errorf("result %s != %s", ret[1], diffTestFileID)
}
}
func testDiffTrees4(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree4, diffTestTree3}, opt)
if len(results) != 2 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestDirID2 {
t.Errorf("result %s != %s", ret[0], diffTestDirID2)
}
if ret[1] != diffTestFileID {
t.Errorf("result %s != %s", ret[1], diffTestFileID)
}
}
func testDiffTrees5(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree3, diffTestTree2}, opt)
if len(results) != 1 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestDirID1 {
t.Errorf("result %s != %s", ret[0], diffTestDirID1)
}
}
func diffTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) {
seafdir, err := fsmgr.NewSeafdir(1, dents)
if err != nil {
return "", err
}
err = fsmgr.SaveSeafdir(diffTestRepoID, seafdir)
if err != nil {
return "", err
}
return seafdir.DirID, nil
}
func diffTestDelFile() error {
err := os.RemoveAll(diffTestSeafileConfPath)
if err != nil {
return err
}
return nil
}
func diffTestFileCB(baseDir string, files []*fsmgr.SeafDirent, data interface{}) error {
file1 := files[0]
file2 := files[1]
results, ok := data.(*[]interface{})
if !ok {
err := fmt.Errorf("failed to assert results")
return err
}
if file1 != nil &&
(file2 == nil || file1.ID != file2.ID) &&
file1.ID != emptySHA1 {
*results = append(*results, file1.ID)
}
return nil
}
func diffTestDirCB(baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error {
dir1 := dirs[0]
dir2 := dirs[1]
results, ok := data.(*[]interface{})
if !ok {
err := fmt.Errorf("failed to assert results")
return err
}
if dir1 != nil &&
(dir2 == nil || dir1.ID != dir2.ID) &&
dir1.ID != emptySHA1 {
*results = append(*results, dir1.ID)
}
return nil
}

3069
fileserver/fileop.go Normal file

File diff suppressed because it is too large Load Diff

432
fileserver/fileserver.go Normal file
View File

@@ -0,0 +1,432 @@
// Main package for Seafile file server.
package main
import (
"database/sql"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
"strings"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
"github.com/haiwen/seafile-server/fileserver/blockmgr"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/repomgr"
"github.com/haiwen/seafile-server/fileserver/searpc"
"github.com/haiwen/seafile-server/fileserver/share"
_ "github.com/mattn/go-sqlite3"
"gopkg.in/ini.v1"
)
var dataDir, absDataDir string
var centralDir string
var logFile, absLogFile string
var rpcPipePath string
var dbType string
var groupTableName string
var cloudMode bool
var seafileDB, ccnetDB *sql.DB
// when SQLite is used, user and group db are separated.
var userDB, groupDB *sql.DB
type fileServerOptions struct {
host string
port uint32
maxUploadSize uint64
maxDownloadDirSize uint64
// Block size for indexing uploaded files
fixedBlockSize uint64
// Maximum number of goroutines to index uploaded files
maxIndexingThreads uint32
webTokenExpireTime uint32
// File mode for temp files
clusterSharedTempFileMode uint32
windowsEncoding string
// Timeout for fs-id-list requests.
fsIDListRequestTimeout uint32
}
var options fileServerOptions
func init() {
flag.StringVar(&centralDir, "F", "", "central config directory")
flag.StringVar(&dataDir, "d", "", "seafile data directory")
flag.StringVar(&logFile, "l", "", "log file path")
flag.StringVar(&rpcPipePath, "p", "", "rpc pipe path")
}
func loadCcnetDB() {
ccnetConfPath := filepath.Join(centralDir, "ccnet.conf")
config, err := ini.Load(ccnetConfPath)
if err != nil {
log.Fatalf("Failed to load ccnet.conf: %v", err)
}
section, err := config.GetSection("Database")
if err != nil {
log.Fatal("No database section in ccnet.conf.")
}
var dbEngine string = "sqlite"
key, err := section.GetKey("ENGINE")
if err == nil {
dbEngine = key.String()
}
if strings.EqualFold(dbEngine, "mysql") {
if key, err = section.GetKey("HOST"); err != nil {
log.Fatal("No database host in ccnet.conf.")
}
host := key.String()
if key, err = section.GetKey("USER"); err != nil {
log.Fatal("No database user in ccnet.conf.")
}
user := key.String()
if key, err = section.GetKey("PASSWD"); err != nil {
log.Fatal("No database password in ccnet.conf.")
}
password := key.String()
if key, err = section.GetKey("DB"); err != nil {
log.Fatal("No database db_name in ccnet.conf.")
}
dbName := key.String()
port := 3306
if key, err = section.GetKey("PORT"); err == nil {
port, _ = key.Int()
}
unixSocket := ""
if key, err = section.GetKey("UNIX_SOCKET"); err == nil {
unixSocket = key.String()
}
useTLS := false
if key, err = section.GetKey("USE_SSL"); err == nil {
useTLS, _ = key.Bool()
}
var dsn string
if unixSocket == "" {
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t", user, password, host, port, dbName, useTLS)
} else {
dsn = fmt.Sprintf("%s:%s@unix(%s)/%s", user, password, unixSocket, dbName)
}
ccnetDB, err = sql.Open("mysql", dsn)
if err != nil {
log.Fatalf("Failed to open database: %v", err)
}
} else if strings.EqualFold(dbEngine, "sqlite") {
ccnetDBPath := filepath.Join(centralDir, "groupmgr.db")
ccnetDB, err = sql.Open("sqlite3", ccnetDBPath)
if err != nil {
log.Fatalf("Failed to open database %s: %v", ccnetDBPath, err)
}
} else {
log.Fatalf("Unsupported database %s.", dbEngine)
}
}
func loadSeafileDB() {
var seafileConfPath string
seafileConfPath = filepath.Join(centralDir, "seafile.conf")
config, err := ini.Load(seafileConfPath)
if err != nil {
log.Fatalf("Failed to load seafile.conf: %v", err)
}
section, err := config.GetSection("database")
if err != nil {
log.Fatal("No database section in seafile.conf.")
}
var dbEngine string = "sqlite"
key, err := section.GetKey("type")
if err == nil {
dbEngine = key.String()
}
if strings.EqualFold(dbEngine, "mysql") {
if key, err = section.GetKey("host"); err != nil {
log.Fatal("No database host in seafile.conf.")
}
host := key.String()
if key, err = section.GetKey("user"); err != nil {
log.Fatal("No database user in seafile.conf.")
}
user := key.String()
if key, err = section.GetKey("password"); err != nil {
log.Fatal("No database password in seafile.conf.")
}
password := key.String()
if key, err = section.GetKey("db_name"); err != nil {
log.Fatal("No database db_name in seafile.conf.")
}
dbName := key.String()
port := 3306
if key, err = section.GetKey("port"); err == nil {
port, _ = key.Int()
}
unixSocket := ""
if key, err = section.GetKey("unix_socket"); err == nil {
unixSocket = key.String()
}
useTLS := false
if key, err = section.GetKey("use_ssl"); err == nil {
useTLS, _ = key.Bool()
}
var dsn string
if unixSocket == "" {
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t", user, password, host, port, dbName, useTLS)
} else {
dsn = fmt.Sprintf("%s:%s@unix(%s)/%s", user, password, unixSocket, dbName)
}
seafileDB, err = sql.Open("mysql", dsn)
if err != nil {
log.Fatalf("Failed to open database: %v", err)
}
} else if strings.EqualFold(dbEngine, "sqlite") {
seafileDBPath := filepath.Join(absDataDir, "seafile.db")
seafileDB, err = sql.Open("sqlite3", seafileDBPath)
if err != nil {
log.Fatalf("Failed to open database %s: %v", seafileDBPath, err)
}
} else {
log.Fatalf("Unsupported database %s.", dbEngine)
}
dbType = dbEngine
}
func loadFileServerOptions() {
var seafileConfPath string
seafileConfPath = filepath.Join(centralDir, "seafile.conf")
config, err := ini.Load(seafileConfPath)
if err != nil {
log.Fatalf("Failed to load seafile.conf: %v", err)
}
cloudMode = false
if section, err := config.GetSection("general"); err == nil {
if key, err := section.GetKey("cloud_mode"); err == nil {
cloudMode, _ = key.Bool()
}
}
initDefaultOptions()
if section, err := config.GetSection("fileserver"); err == nil {
if key, err := section.GetKey("host"); err == nil {
options.host = key.String()
}
if key, err := section.GetKey("port"); err == nil {
port, err := key.Uint()
if err == nil {
options.port = uint32(port)
}
}
if key, err := section.GetKey("max_indexing_threads"); err == nil {
threads, err := key.Uint()
if err == nil {
options.maxIndexingThreads = uint32(threads)
}
}
if key, err := section.GetKey("fixed_block_size"); err == nil {
blkSize, err := key.Uint64()
if err == nil {
options.fixedBlockSize = blkSize
}
}
if key, err := section.GetKey("web_token_expire_time"); err == nil {
expire, err := key.Uint()
if err == nil {
options.webTokenExpireTime = uint32(expire)
}
}
if key, err := section.GetKey("cluster_shared_temp_file_mode"); err == nil {
fileMode, err := key.Uint()
if err == nil {
options.clusterSharedTempFileMode = uint32(fileMode)
}
}
}
ccnetConfPath := filepath.Join(centralDir, "ccnet.conf")
config, err = ini.Load(ccnetConfPath)
if err != nil {
log.Fatalf("Failed to load ccnet.conf: %v", err)
}
groupTableName = "Group"
if section, err := config.GetSection("GROUP"); err == nil {
if key, err := section.GetKey("TABLE_NAME"); err == nil {
groupTableName = key.String()
}
}
}
func initDefaultOptions() {
options.host = "0.0.0.0"
options.port = 8082
options.maxDownloadDirSize = 100 * (1 << 20)
options.fixedBlockSize = 1 << 23
options.maxIndexingThreads = 1
options.webTokenExpireTime = 7200
options.clusterSharedTempFileMode = 0600
}
func main() {
flag.Parse()
if centralDir == "" {
log.Fatal("central config directory must be specified.")
}
_, err := os.Stat(centralDir)
if os.IsNotExist(err) {
log.Fatalf("central config directory %s doesn't exist: %v.", centralDir, err)
}
loadCcnetDB()
if dataDir == "" {
log.Fatal("seafile data directory must be specified.")
}
_, err = os.Stat(dataDir)
if os.IsNotExist(err) {
log.Fatalf("seafile data directory %s doesn't exist: %v.", dataDir, err)
}
absDataDir, err = filepath.Abs(dataDir)
if err != nil {
log.Fatalf("Failed to convert seafile data dir to absolute path: %v.", err)
}
loadSeafileDB()
loadFileServerOptions()
if logFile == "" {
absLogFile = filepath.Join(absDataDir, "seafile.log")
fp, err := os.OpenFile(absLogFile, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
log.Fatalf("Failed to open or create log file: %v", err)
}
log.SetOutput(fp)
} else if logFile != "-" {
absLogFile, err = filepath.Abs(logFile)
if err != nil {
log.Fatalf("Failed to convert log file path to absolute path: %v", err)
}
fp, err := os.OpenFile(absLogFile, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
log.Fatalf("Failed to open or create log file: %v", err)
}
log.SetOutput(fp)
}
// When logFile is "-", use default output (StdOut)
log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)
repomgr.Init(seafileDB)
fsmgr.Init(centralDir, dataDir)
blockmgr.Init(centralDir, dataDir)
commitmgr.Init(centralDir, dataDir)
share.Init(ccnetDB, seafileDB, groupTableName, cloudMode)
rpcClientInit()
syncAPIInit()
sizeSchedulerInit()
initUpload()
router := newHTTPRouter()
log.Print("Seafile file server started.")
addr := fmt.Sprintf("%s:%d", options.host, options.port)
err = http.ListenAndServe(addr, router)
if err != nil {
log.Printf("File server exiting: %v", err)
}
}
var rpcclient *searpc.Client
func rpcClientInit() {
var pipePath string
if rpcPipePath != "" {
pipePath = filepath.Join(rpcPipePath, "seafile.sock")
} else {
pipePath = filepath.Join(absDataDir, "seafile.sock")
}
rpcclient = searpc.Init(pipePath, "seafserv-threaded-rpcserver")
}
func newHTTPRouter() *mux.Router {
r := mux.NewRouter()
r.HandleFunc("/protocol-version", handleProtocolVersion)
r.Handle("/files/{.*}/{.*}", appHandler(accessCB))
r.Handle("/blks/{.*}/{.*}", appHandler(accessBlksCB))
r.Handle("/zip/{.*}", appHandler(accessZipCB))
r.Handle("/upload-api/{.*}", appHandler(uploadAPICB))
r.Handle("/upload-aj/{.*}", appHandler(uploadAjaxCB))
r.Handle("/update-api/{.*}", appHandler(updateAPICB))
r.Handle("/update-aj/{.*}", appHandler(updateAjaxCB))
r.Handle("/upload-blks-api/{.*}", appHandler(uploadBlksAPICB))
r.Handle("/upload-raw-blks-api/{.*}", appHandler(uploadRawBlksAPICB))
// file syncing api
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/permission-check/",
appHandler(permissionCheckCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/commit/{HEAD:HEAD\\/?}",
appHandler(headCommitOperCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/commit/{id:[\\da-z]{40}}",
appHandler(commitOperCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/block/{id:[\\da-z]{40}}",
appHandler(blockOperCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/fs-id-list/",
appHandler(getFsObjIDCB))
r.Handle("/repo/head-commits-multi/",
appHandler(headCommitsMultiCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/pack-fs/",
appHandler(packFSCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/check-fs/",
appHandler(checkFSCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/check-blocks/",
appHandler(checkBlockCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/recv-fs/",
appHandler(recvFSCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/quota-check/",
appHandler(getCheckQuotaCB))
// seadrive api
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/block-map/{id:[\\da-z]{40}}",
appHandler(getBlockMapCB))
r.Handle("/accessible-repos", appHandler(getAccessibleRepoListCB))
return r
}
func handleProtocolVersion(rsp http.ResponseWriter, r *http.Request) {
io.WriteString(rsp, "{\"version\": 2}")
}
type appError struct {
Error error
Message string
Code int
}
type appHandler func(http.ResponseWriter, *http.Request) *appError
func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if e := fn(w, r); e != nil {
if e.Error != nil && e.Code == http.StatusInternalServerError {
log.Printf("path %s internal server error: %v\n", r.URL.Path, e.Error)
}
http.Error(w, e.Message, e.Code)
}
}

521
fileserver/fsmgr/fsmgr.go Normal file
View File

@@ -0,0 +1,521 @@
// Package fsmgr manages fs objects
package fsmgr
import (
"bytes"
"compress/zlib"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"path/filepath"
"strings"
"syscall"
"github.com/haiwen/seafile-server/fileserver/objstore"
)
// Seafile is a file object
type Seafile struct {
Version int `json:"version"`
FileType int `json:"type,omitempty"`
FileID string `json:"file_id,omitempty"`
FileSize uint64 `json:"size"`
BlkIDs []string `json:"block_ids"`
}
// SeafDirent is a dir entry object
type SeafDirent struct {
Mode uint32 `json:"mode"`
ID string `json:"id"`
Name string `json:"name"`
Mtime int64 `json:"mtime"`
Modifier string `json:"modifier"`
Size int64 `json:"size"`
}
//SeafDir is a dir object
type SeafDir struct {
Version int `json:"version"`
DirType int `json:"type,omitempty"`
DirID string `json:"dir_id,omitempty"`
Entries []*SeafDirent `json:"dirents"`
}
// FileCountInfo contains information of files
type FileCountInfo struct {
FileCount int64
Size int64
DirCount int64
}
// Meta data type of dir or file
const (
SeafMetadataTypeInvalid = iota
SeafMetadataTypeFile
SeafMetadataTypeLink
SeafMetadataTypeDir
)
var store *objstore.ObjectStore
// Empty value of sha1
const (
EmptySha1 = "0000000000000000000000000000000000000000"
)
// Init initializes fs manager and creates underlying object store.
func Init(seafileConfPath string, seafileDataDir string) {
store = objstore.New(seafileConfPath, seafileDataDir, "fs")
}
// NewDirent initializes a SeafDirent object
func NewDirent(id string, name string, mode uint32, mtime int64, modifier string, size int64) *SeafDirent {
dent := new(SeafDirent)
dent.ID = id
if id == "" {
dent.ID = EmptySha1
}
dent.Name = name
dent.Mode = mode
dent.Mtime = mtime
if IsRegular(mode) {
dent.Modifier = modifier
dent.Size = size
}
return dent
}
// NewSeafdir initializes a SeafDir object
func NewSeafdir(version int, entries []*SeafDirent) (*SeafDir, error) {
dir := new(SeafDir)
dir.Version = version
dir.Entries = entries
jsonstr, err := json.Marshal(dir)
if err != nil {
err := fmt.Errorf("failed to convert seafdir to json")
return nil, err
}
checksum := sha1.Sum(jsonstr)
dir.DirID = hex.EncodeToString(checksum[:])
return dir, nil
}
// NewSeafile initializes a Seafile object
func NewSeafile(version int, fileSize int64, blkIDs []string) (*Seafile, error) {
seafile := new(Seafile)
seafile.Version = version
seafile.FileSize = uint64(fileSize)
seafile.BlkIDs = blkIDs
jsonstr, err := json.Marshal(seafile)
if err != nil {
err := fmt.Errorf("failed to convert seafile to json")
return nil, err
}
checkSum := sha1.Sum(jsonstr)
seafile.FileID = hex.EncodeToString(checkSum[:])
return seafile, nil
}
func uncompress(p []byte) ([]byte, error) {
b := bytes.NewReader(p)
var out bytes.Buffer
r, err := zlib.NewReader(b)
if err != nil {
return nil, err
}
_, err = io.Copy(&out, r)
if err != nil {
r.Close()
return nil, err
}
r.Close()
return out.Bytes(), nil
}
func compress(p []byte) ([]byte, error) {
var out bytes.Buffer
w := zlib.NewWriter(&out)
_, err := w.Write(p)
if err != nil {
w.Close()
return nil, err
}
w.Close()
return out.Bytes(), nil
}
// FromData reads from p and converts JSON-encoded data to Seafile.
func (seafile *Seafile) FromData(p []byte) error {
b, err := uncompress(p)
if err != nil {
return err
}
err = json.Unmarshal(b, seafile)
if err != nil {
return err
}
return nil
}
// ToData converts seafile to JSON-encoded data and writes to w.
func (seafile *Seafile) ToData(w io.Writer) error {
jsonstr, err := json.Marshal(seafile)
if err != nil {
return err
}
buf, err := compress(jsonstr)
if err != nil {
return err
}
_, err = w.Write(buf)
if err != nil {
return err
}
return nil
}
// ToData converts seafdir to JSON-encoded data and writes to w.
func (seafdir *SeafDir) ToData(w io.Writer) error {
jsonstr, err := json.Marshal(seafdir)
if err != nil {
return err
}
buf, err := compress(jsonstr)
if err != nil {
return err
}
_, err = w.Write(buf)
if err != nil {
return err
}
return nil
}
// FromData reads from p and converts JSON-encoded data to SeafDir.
func (seafdir *SeafDir) FromData(p []byte) error {
b, err := uncompress(p)
if err != nil {
return err
}
err = json.Unmarshal(b, seafdir)
if err != nil {
return err
}
return nil
}
// ReadRaw reads data in binary format from storage backend.
func ReadRaw(repoID string, objID string, w io.Writer) error {
err := store.Read(repoID, objID, w)
if err != nil {
return err
}
return nil
}
// WriteRaw writes data in binary format to storage backend.
func WriteRaw(repoID string, objID string, r io.Reader) error {
err := store.Write(repoID, objID, r, false)
if err != nil {
return err
}
return nil
}
// GetSeafile gets seafile from storage backend.
func GetSeafile(repoID string, fileID string) (*Seafile, error) {
var buf bytes.Buffer
seafile := new(Seafile)
if fileID == EmptySha1 {
seafile.FileID = EmptySha1
return seafile, nil
}
err := ReadRaw(repoID, fileID, &buf)
if err != nil {
errors := fmt.Errorf("failed to read seafile object from storage : %v", err)
return nil, errors
}
err = seafile.FromData(buf.Bytes())
if err != nil {
errors := fmt.Errorf("failed to parse seafile object %s/%s : %v", repoID, fileID, err)
return nil, errors
}
if seafile.Version < 1 {
errors := fmt.Errorf("seafile object %s/%s version should be > 0", repoID, fileID)
return nil, errors
}
seafile.FileID = fileID
return seafile, nil
}
// SaveSeafile saves seafile to storage backend.
func SaveSeafile(repoID string, seafile *Seafile) error {
fileID := seafile.FileID
exist, _ := store.Exists(repoID, fileID)
if exist {
return nil
}
seafile.FileType = SeafMetadataTypeFile
var buf bytes.Buffer
err := seafile.ToData(&buf)
if err != nil {
errors := fmt.Errorf("failed to convert seafile object %s/%s to json", repoID, fileID)
return errors
}
err = WriteRaw(repoID, fileID, &buf)
if err != nil {
errors := fmt.Errorf("failed to write seafile object to storage : %v", err)
return errors
}
return nil
}
// GetSeafdir gets seafdir from storage backend.
func GetSeafdir(repoID string, dirID string) (*SeafDir, error) {
var buf bytes.Buffer
seafdir := new(SeafDir)
if dirID == EmptySha1 {
seafdir.DirID = EmptySha1
return seafdir, nil
}
err := ReadRaw(repoID, dirID, &buf)
if err != nil {
errors := fmt.Errorf("failed to read seafdir object from storage : %v", err)
return nil, errors
}
err = seafdir.FromData(buf.Bytes())
if err != nil {
errors := fmt.Errorf("failed to parse seafdir object %s/%s : %v", repoID, dirID, err)
return nil, errors
}
if seafdir.Version < 1 {
errors := fmt.Errorf("seadir object %s/%s version should be > 0", repoID, dirID)
return nil, errors
}
seafdir.DirID = dirID
return seafdir, nil
}
// SaveSeafdir saves seafdir to storage backend.
func SaveSeafdir(repoID string, seafdir *SeafDir) error {
dirID := seafdir.DirID
exist, _ := store.Exists(repoID, dirID)
if exist {
return nil
}
seafdir.DirType = SeafMetadataTypeDir
var buf bytes.Buffer
err := seafdir.ToData(&buf)
if err != nil {
errors := fmt.Errorf("failed to convert seafdir object %s/%s to json", repoID, dirID)
return errors
}
err = WriteRaw(repoID, dirID, &buf)
if err != nil {
errors := fmt.Errorf("failed to write seafdir object to storage : %v", err)
return errors
}
return nil
}
// Exists check if fs object is exists.
func Exists(repoID string, objID string) (bool, error) {
if objID == EmptySha1 {
return true, nil
}
return store.Exists(repoID, objID)
}
func comp(c rune) bool {
if c == '/' {
return true
}
return false
}
// IsDir check if the mode is dir.
func IsDir(m uint32) bool {
return (m & syscall.S_IFMT) == syscall.S_IFDIR
}
// IsRegular Check if the mode is regular.
func IsRegular(m uint32) bool {
return (m & syscall.S_IFMT) == syscall.S_IFREG
}
// ErrPathNoExist is an error indicating that the file does not exist
var ErrPathNoExist = fmt.Errorf("path does not exist")
// GetSeafdirByPath gets the object of seafdir by path.
func GetSeafdirByPath(repoID string, rootID string, path string) (*SeafDir, error) {
dir, err := GetSeafdir(repoID, rootID)
if err != nil {
errors := fmt.Errorf("directory is missing")
return nil, errors
}
path = filepath.Join("/", path)
parts := strings.FieldsFunc(path, comp)
var dirID string
for _, name := range parts {
entries := dir.Entries
for _, v := range entries {
if v.Name == name && IsDir(v.Mode) {
dirID = v.ID
break
}
}
if dirID == `` {
return nil, ErrPathNoExist
}
dir, err = GetSeafdir(repoID, dirID)
if err != nil {
errors := fmt.Errorf("directory is missing")
return nil, errors
}
}
return dir, nil
}
// GetSeafdirIDByPath gets the dirID of SeafDir by path.
func GetSeafdirIDByPath(repoID, rootID, path string) (string, error) {
dirID, mode, err := GetObjIDByPath(repoID, rootID, path)
if err != nil {
err := fmt.Errorf("failed to get dir id by path: %s: %v", path, err)
return "", err
}
if dirID == "" || !IsDir(mode) {
return "", nil
}
return dirID, nil
}
// GetObjIDByPath gets the obj id by path
func GetObjIDByPath(repoID, rootID, path string) (string, uint32, error) {
var name string
var baseDir *SeafDir
formatPath := filepath.Join(path)
if len(formatPath) == 0 || formatPath == "/" {
return rootID, syscall.S_IFDIR, nil
}
index := strings.Index(formatPath, "/")
if index < 0 {
dir, err := GetSeafdir(repoID, rootID)
if err != nil {
err := fmt.Errorf("failed to find root dir %s: %v", rootID, err)
return "", 0, err
}
name = formatPath
baseDir = dir
} else {
name = filepath.Base(formatPath)
dirName := filepath.Dir(formatPath)
dir, err := GetSeafdirByPath(repoID, rootID, dirName)
if err != nil {
if err == ErrPathNoExist {
return "", syscall.S_IFDIR, ErrPathNoExist
}
err := fmt.Errorf("failed to find dir %s in repo %s: %v", dirName, repoID, err)
return "", syscall.S_IFDIR, err
}
baseDir = dir
}
entries := baseDir.Entries
for _, de := range entries {
if de.Name == name {
return de.ID, de.Mode, nil
}
}
return "", 0, nil
}
// GetFileCountInfoByPath gets the count info of file by path.
func GetFileCountInfoByPath(repoID, rootID, path string) (*FileCountInfo, error) {
dirID, err := GetSeafdirIDByPath(repoID, rootID, path)
if err != nil {
err := fmt.Errorf("failed to get file count info for repo %s path %s: %v", repoID, path, err)
return nil, err
}
info, err := getFileCountInfo(repoID, dirID)
if err != nil {
err := fmt.Errorf("failed to get file count in repo %s: %v", repoID, err)
return nil, err
}
return info, nil
}
func getFileCountInfo(repoID, dirID string) (*FileCountInfo, error) {
dir, err := GetSeafdir(repoID, dirID)
if err != nil {
err := fmt.Errorf("failed to get dir: %v", err)
return nil, err
}
info := new(FileCountInfo)
entries := dir.Entries
for _, de := range entries {
if IsDir(de.Mode) {
tmpInfo, err := getFileCountInfo(repoID, de.ID)
if err != nil {
err := fmt.Errorf("failed to get file count: %v", err)
return nil, err
}
info.DirCount = tmpInfo.DirCount + 1
info.FileCount += tmpInfo.FileCount
info.Size += tmpInfo.Size
} else {
info.FileCount++
info.Size += de.Size
}
}
return info, nil
}

View File

@@ -0,0 +1,129 @@
package fsmgr
import (
"fmt"
"os"
"testing"
)
const (
seafileConfPath = "/tmp/conf"
seafileDataDir = "/tmp/conf/seafile-data"
repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
blkID = "0401fc662e3bc87a41f299a907c056aaf8322a26"
subDirID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
)
var dirID string
var fileID string
func createFile() error {
var blkIDs []string
for i := 0; i < 2; i++ {
blkshal := blkID
blkIDs = append(blkIDs, blkshal)
}
seafile, err := NewSeafile(1, 100, blkIDs)
err = SaveSeafile(repoID, seafile)
if err != nil {
return err
}
fileID = seafile.FileID
var entries []*SeafDirent
for i := 0; i < 2; i++ {
dirent := SeafDirent{ID: subDirID, Name: "/", Mode: 0x4000}
entries = append(entries, &dirent)
}
seafdir, err := NewSeafdir(1, entries)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return err
}
err = SaveSeafdir(repoID, seafdir)
if err != nil {
return err
}
dirID = seafdir.DirID
return nil
}
func delFile() error {
err := os.RemoveAll(seafileConfPath)
if err != nil {
return err
}
return nil
}
func TestMain(m *testing.M) {
Init(seafileConfPath, seafileDataDir)
err := createFile()
if err != nil {
fmt.Printf("Failed to create test file : %v.\n", err)
os.Exit(1)
}
code := m.Run()
err = delFile()
if err != nil {
fmt.Printf("Failed to remove test file : %v\n", err)
}
os.Exit(code)
}
func TestGetSeafile(t *testing.T) {
exists, err := Exists(repoID, fileID)
if !exists {
t.Errorf("seafile is not exists : %v.\n", err)
}
seafile, err := GetSeafile(repoID, fileID)
if err != nil || seafile == nil {
t.Errorf("Failed to get seafile : %v.\n", err)
t.FailNow()
}
for _, v := range seafile.BlkIDs {
if v != blkID {
t.Errorf("Wrong file content.\n")
}
}
}
func TestGetSeafdir(t *testing.T) {
exists, err := Exists(repoID, dirID)
if !exists {
t.Errorf("seafile is not exists : %v.\n", err)
}
seafdir, err := GetSeafdir(repoID, dirID)
if err != nil || seafdir == nil {
t.Errorf("Failed to get seafdir : %v.\n", err)
t.FailNow()
}
for _, v := range seafdir.Entries {
if v.ID != subDirID {
t.Errorf("Wrong file content.\n")
}
}
}
func TestGetSeafdirByPath(t *testing.T) {
seafdir, err := GetSeafdirByPath(repoID, dirID, "/")
if err != nil || seafdir == nil {
t.Errorf("Failed to get seafdir : %v.\n", err)
t.FailNow()
}
for _, v := range seafdir.Entries {
if v.ID != subDirID {
t.Errorf("Wrong file content.\n")
}
}
}

12
fileserver/go.mod Normal file
View File

@@ -0,0 +1,12 @@
module github.com/haiwen/seafile-server/fileserver
go 1.14
require (
github.com/go-sql-driver/mysql v1.5.0
github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.4
github.com/mattn/go-sqlite3 v1.14.0
github.com/smartystreets/goconvey v1.6.4 // indirect
gopkg.in/ini.v1 v1.55.0
)

29
fileserver/go.sum Normal file
View File

@@ -0,0 +1,29 @@
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA=
github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.3 h1:QdmJJYlDQhMDFrFP8IvVnx66D8mCbaQM4TsxKf7BXzo=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ=
gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=

12
fileserver/http_code.go Normal file
View File

@@ -0,0 +1,12 @@
package main
const (
seafHTTPResBadFileName = 440
seafHTTPResExists = 441
seafHTTPResNotExists = 441
seafHTTPResTooLarge = 442
seafHTTPResNoQuota = 443
seafHTTPResRepoDeleted = 444
seafHTTPResRepoCorrupted = 445
seafHTTPResBlockMissing = 446
)

402
fileserver/merge.go Normal file
View File

@@ -0,0 +1,402 @@
package main
import (
"fmt"
"path/filepath"
"sort"
"strings"
"time"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
)
type mergeOptions struct {
remoteRepoID string
remoteHead string
mergedRoot string
conflict bool
}
func mergeTrees(storeID string, roots []string, opt *mergeOptions) error {
if len(roots) != 3 {
err := fmt.Errorf("invalid argument")
return err
}
var trees []*fsmgr.SeafDir
for i := 0; i < 3; i++ {
dir, err := fsmgr.GetSeafdir(storeID, roots[i])
if err != nil {
err := fmt.Errorf("failed to get dir: %v", err)
return err
}
trees = append(trees, dir)
}
err := mergeTreesRecursive(storeID, trees, "", opt)
if err != nil {
err := fmt.Errorf("failed to merge trees: %v", err)
return err
}
return nil
}
func mergeTreesRecursive(storeID string, trees []*fsmgr.SeafDir, baseDir string, opt *mergeOptions) error {
var ptrs [3][]*fsmgr.SeafDirent
var mergedDents []*fsmgr.SeafDirent
n := 3
for i := 0; i < n; i++ {
if trees[i] != nil {
ptrs[i] = trees[i].Entries
}
}
var done bool
var offset = make([]int, n)
for {
dents := make([]*fsmgr.SeafDirent, n)
var firstName string
done = true
for i := 0; i < n; i++ {
if len(ptrs[i]) > offset[i] {
done = false
dent := ptrs[i][offset[i]]
if firstName == "" {
firstName = dent.Name
} else if dent.Name > firstName {
firstName = dent.Name
}
}
}
if done {
break
}
var nFiles, nDirs int
for i := 0; i < n; i++ {
if len(ptrs[i]) > offset[i] {
dent := ptrs[i][offset[i]]
if firstName == dent.Name {
if fsmgr.IsDir(dent.Mode) {
nDirs++
} else {
nFiles++
}
dents[i] = dent
offset[i]++
}
}
}
if nFiles > 0 {
retDents, err := mergeEntries(storeID, dents, baseDir, opt)
if err != nil {
return err
}
mergedDents = append(mergedDents, retDents...)
}
if nDirs > 0 {
retDents, err := mergeDirectories(storeID, dents, baseDir, opt)
if err != nil {
return err
}
mergedDents = append(mergedDents, retDents...)
}
}
sort.Sort(Dirents(mergedDents))
mergedTree, err := fsmgr.NewSeafdir(1, mergedDents)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return err
}
opt.mergedRoot = mergedTree.DirID
if trees[1] != nil && trees[1].DirID == mergedTree.DirID ||
trees[2] != nil && trees[2].DirID == mergedTree.DirID {
return nil
}
err = fsmgr.SaveSeafdir(storeID, mergedTree)
if err != nil {
err := fmt.Errorf("failed to save merged tree %s/%s", storeID, baseDir)
return err
}
return nil
}
func mergeEntries(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) {
var mergedDents []*fsmgr.SeafDirent
n := 3
files := make([]*fsmgr.SeafDirent, n)
for i := 0; i < n; i++ {
if dents[i] != nil && !fsmgr.IsDir(dents[i].Mode) {
files[i] = dents[i]
}
}
base := files[0]
head := files[1]
remote := files[2]
if head != nil && remote != nil {
if head.ID == remote.ID {
mergedDents = append(mergedDents, head)
} else if base != nil && base.ID == head.ID {
mergedDents = append(mergedDents, remote)
} else if base != nil && base.ID == remote.ID {
mergedDents = append(mergedDents, head)
} else {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, head.Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, remote)
opt.conflict = true
}
} else if base != nil && head == nil && remote != nil {
if base.ID != remote.ID {
if dents[1] != nil {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, remote)
opt.conflict = true
} else {
mergedDents = append(mergedDents, remote)
}
}
} else if base != nil && head != nil && remote == nil {
if base.ID != head.ID {
if dents[2] != nil {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, head)
opt.conflict = true
} else {
mergedDents = append(mergedDents, head)
}
}
} else if base == nil && head == nil && remote != nil {
if dents[1] == nil {
mergedDents = append(mergedDents, remote)
} else if dents[0] != nil && dents[0].ID == dents[1].ID {
mergedDents = append(mergedDents, remote)
} else {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, remote)
opt.conflict = true
}
} else if base == nil && head != nil && remote == nil {
if dents[2] == nil {
mergedDents = append(mergedDents, head)
} else if dents[0] != nil && dents[0].ID == dents[2].ID {
mergedDents = append(mergedDents, head)
} else {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, head)
opt.conflict = true
}
} else if base != nil && head == nil && remote == nil {
}
return mergedDents, nil
}
func mergeDirectories(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) {
var dirMask int
var mergedDents []*fsmgr.SeafDirent
var dirName string
n := 3
subDirs := make([]*fsmgr.SeafDir, n)
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {
dirMask |= 1 << i
}
}
switch dirMask {
case 0:
err := fmt.Errorf("no dirent for merge")
return nil, err
case 1:
return mergedDents, nil
case 2:
mergedDents = append(mergedDents, dents[1])
return mergedDents, nil
case 3:
if dents[0].ID == dents[1].ID {
return mergedDents, nil
}
break
case 4:
mergedDents = append(mergedDents, dents[2])
return mergedDents, nil
case 5:
if dents[0].ID == dents[2].ID {
return mergedDents, nil
}
break
case 6:
case 7:
if dents[1].ID == dents[2].ID {
mergedDents = append(mergedDents, dents[1])
return mergedDents, nil
} else if dents[0] != nil && dents[0].ID == dents[1].ID {
mergedDents = append(mergedDents, dents[2])
return mergedDents, nil
} else if dents[0] != nil && dents[0].ID == dents[2].ID {
mergedDents = append(mergedDents, dents[1])
return mergedDents, nil
}
break
default:
err := fmt.Errorf("wrong dir mask for merge")
return nil, err
}
for i := 0; i < n; i++ {
subDirs[i] = nil
}
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {
dir, err := fsmgr.GetSeafdir(storeID, dents[i].ID)
if err != nil {
err := fmt.Errorf("failed to get seafdir %s/%s", storeID, dents[i].ID)
return nil, err
}
subDirs[i] = dir
dirName = dents[i].Name
}
}
newBaseDir := filepath.Join(baseDir, dirName)
newBaseDir = newBaseDir + "/"
err := mergeTreesRecursive(storeID, subDirs, newBaseDir, opt)
if err != nil {
err := fmt.Errorf("failed to merge trees: %v", err)
return nil, err
}
if dirMask == 3 || dirMask == 6 || dirMask == 7 {
dent := dents[1]
dent.ID = opt.mergedRoot
mergedDents = append(mergedDents, dent)
} else if dirMask == 5 {
dent := dents[2]
dent.ID = opt.mergedRoot
mergedDents = append(mergedDents, dent)
}
return mergedDents, nil
}
func mergeConflictFileName(storeID string, opt *mergeOptions, baseDir, fileName string) (string, error) {
var modifier string
var mtime int64
filePath := filepath.Join(baseDir, fileName)
modifier, mtime, err := getFileModifierMtime(opt.remoteRepoID, storeID, opt.remoteHead, filePath)
if err != nil {
commit, err := commitmgr.Load(opt.remoteRepoID, opt.remoteHead)
if err != nil {
err := fmt.Errorf("failed to get head commit")
return "", err
}
modifier = commit.CreatorName
mtime = time.Now().Unix()
}
conflictName := genConflictPath(fileName, modifier, mtime)
return conflictName, nil
}
func genConflictPath(originPath, modifier string, mtime int64) string {
var conflictPath string
now := time.Now()
timeBuf := now.Format("2006-Jan-2-15-04-05")
dot := strings.Index(originPath, ".")
if dot < 0 {
if modifier != "" {
conflictPath = fmt.Sprintf("%s (SFConflict %s %s)",
originPath, modifier, timeBuf)
} else {
conflictPath = fmt.Sprintf("%s (SFConflict %s)",
originPath, timeBuf)
}
} else {
if modifier != "" {
conflictPath = fmt.Sprintf("%s (SFConflict %s %s).%s",
originPath, modifier, timeBuf, originPath[dot+1:])
} else {
conflictPath = fmt.Sprintf("%s (SFConflict %s).%s",
originPath, timeBuf, originPath[dot+1:])
}
}
return conflictPath
}
func getFileModifierMtime(repoID, storeID, head, filePath string) (string, int64, error) {
commit, err := commitmgr.Load(repoID, head)
if err != nil {
err := fmt.Errorf("failed to get head commit")
return "", -1, err
}
parent := filepath.Dir(filePath)
if parent == "." {
parent = ""
}
fileName := filepath.Base(filePath)
dir, err := fsmgr.GetSeafdirByPath(storeID, commit.RootID, parent)
if err != nil {
err := fmt.Errorf("dir %s doesn't exist in repo %s", parent, repoID)
return "", -1, err
}
var dent *fsmgr.SeafDirent
entries := dir.Entries
for _, d := range entries {
if d.Name == fileName {
dent = d
break
}
}
if dent == nil {
err := fmt.Errorf("file %s doesn't exist in repo %s", fileName, repoID)
return "", -1, err
}
return dent.Modifier, dent.Mtime, nil
}

485
fileserver/merge_test.go Normal file
View File

@@ -0,0 +1,485 @@
package main
import (
"fmt"
"os"
"syscall"
"testing"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
)
const (
mergeTestCommitID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
mergeTestRepoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
mergeTestSeafileConfPath = "/tmp/conf"
mergeTestSeafileDataDir = "/tmp/conf/seafile-data"
)
var mergeTestTree1 string
var mergeTestTree2 string
var mergeTestTree3 string
var mergeTestTree4 string
var mergeTestTree5 string
var mergeTestTree1CommitID string
var mergeTestTree2CommitID string
var mergeTestTree3CommitID string
var mergeTestTree4CommitID string
var mergeTestTree5CommitID string
/*
test directory structure:
tree1
|--bbb
|-- testfile(size:1)
tree2
|--bbb
|-- testfile(size:10)
tree3
|--bbb
tree4
|--bbb
|-- testfile(size:100)
tree5
|--
*/
func mergeTestCreateTestDir() error {
modeDir := uint32(syscall.S_IFDIR | 0644)
modeFile := uint32(syscall.S_IFREG | 0644)
emptyDir, err := mergeTestCreateSeafdir(nil)
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree5 = emptyDir
file1, err := fsmgr.NewSeafile(1, 1, nil)
if err != nil {
err := fmt.Errorf("failed to new seafile: %v", err)
return err
}
err = fsmgr.SaveSeafile(mergeTestRepoID, file1)
if err != nil {
err := fmt.Errorf("failed to save seafile: %v", err)
return err
}
dent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: "testfile", Mode: modeFile, Size: 1}
dir1, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
dent2 := fsmgr.SeafDirent{ID: dir1, Name: "bbb", Mode: modeDir}
dir2, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree1 = dir2
commit1 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree1, "seafile", "this is the first commit.\n")
err = commitmgr.Save(commit1)
if err != nil {
err := fmt.Errorf("failed to save commit: %v", err)
return err
}
mergeTestTree1CommitID = commit1.CommitID
file2, err := fsmgr.NewSeafile(1, 10, nil)
if err != nil {
err := fmt.Errorf("failed to new seafile: %v", err)
return err
}
err = fsmgr.SaveSeafile(mergeTestRepoID, file2)
if err != nil {
err := fmt.Errorf("failed to save seafile: %v", err)
return err
}
dent3 := fsmgr.SeafDirent{ID: file2.FileID, Name: "testfile", Mode: modeFile, Size: 10}
dir3, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
dent4 := fsmgr.SeafDirent{ID: dir3, Name: "bbb", Mode: modeDir}
dir4, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent4})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree2 = dir4
commit2 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree2, "seafile", "this is the second commit.\n")
err = commitmgr.Save(commit2)
if err != nil {
err := fmt.Errorf("failed to save commit: %v", err)
return err
}
mergeTestTree2CommitID = commit2.CommitID
dir5, err := mergeTestCreateSeafdir(nil)
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
dent6 := fsmgr.SeafDirent{ID: dir5, Name: "bbb", Mode: modeDir}
dir6, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent6})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree3 = dir6
commit3 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree3, "seafile", "this is the third commit.\n")
err = commitmgr.Save(commit3)
if err != nil {
err := fmt.Errorf("failed to save commit: %v", err)
return err
}
mergeTestTree3CommitID = commit3.CommitID
file3, err := fsmgr.NewSeafile(1, 100, nil)
if err != nil {
err := fmt.Errorf("failed to new seafile: %v", err)
return err
}
err = fsmgr.SaveSeafile(mergeTestRepoID, file3)
if err != nil {
err := fmt.Errorf("failed to save seafile: %v", err)
return err
}
dent7 := fsmgr.SeafDirent{ID: file3.FileID, Name: "testfile", Mode: modeFile, Size: 100}
dir7, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent7})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
dent8 := fsmgr.SeafDirent{ID: dir7, Name: "bbb", Mode: modeDir}
dir8, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent8})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree4 = dir8
commit4 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree3, "seafile", "this is the fourth commit.\n")
err = commitmgr.Save(commit4)
if err != nil {
err := fmt.Errorf("failed to save commit: %v", err)
return err
}
mergeTestTree4CommitID = commit4.CommitID
return nil
}
func mergeTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) {
seafdir, err := fsmgr.NewSeafdir(1, dents)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return "", err
}
err = fsmgr.SaveSeafdir(mergeTestRepoID, seafdir)
if err != nil {
return "", err
}
return seafdir.DirID, nil
}
func mergeTestDelFile() error {
err := os.RemoveAll(mergeTestSeafileConfPath)
if err != nil {
return err
}
return nil
}
func TestMergeTrees(t *testing.T) {
commitmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir)
fsmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir)
err := mergeTestCreateTestDir()
if err != nil {
fmt.Printf("failed to create test dir: %v", err)
os.Exit(1)
}
t.Run("test1", testMergeTrees1)
t.Run("test2", testMergeTrees2)
t.Run("test3", testMergeTrees3)
t.Run("test4", testMergeTrees4)
t.Run("test5", testMergeTrees5)
t.Run("test6", testMergeTrees6)
t.Run("test7", testMergeTrees7)
t.Run("test8", testMergeTrees8)
t.Run("test9", testMergeTrees9)
t.Run("test10", testMergeTrees10)
t.Run("test11", testMergeTrees11)
t.Run("test12", testMergeTrees12)
err = mergeTestDelFile()
if err != nil {
fmt.Printf("failed to remove test file : %v", err)
os.Exit(1)
}
}
// head add file
func testMergeTrees1(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree2, mergeTestTree3}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// remote add file
func testMergeTrees2(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree3, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head modify file
func testMergeTrees3(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree1}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// remote modify file
func testMergeTrees4(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree1, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head and remote add file
func testMergeTrees5(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if !opt.conflict {
t.Errorf("merge error %s.\n", opt.mergedRoot)
}
}
// head and remote modify file
func testMergeTrees6(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree4}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if !opt.conflict {
t.Errorf("merge error %s.\n", opt.mergedRoot)
}
}
// head modify file and remote delete file
func testMergeTrees7(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree3}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head delete file and remote modify file
func testMergeTrees8(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree3, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head modify file and remote delete dir of this file
func testMergeTrees9(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree5}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// remote modify file and head delete dir of this file
func testMergeTrees10(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree5, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head add file and remote delete dir of thie file
func testMergeTrees11(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree5}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree1 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree1)
}
}
// remote add file and head delete dir of this file
func testMergeTrees12(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree5, mergeTestTree1}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree1 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree1)
}
}

View File

@@ -0,0 +1,99 @@
// Implementation of file system storage backend.
package objstore
import (
"io"
"io/ioutil"
"os"
"path"
)
type fsBackend struct {
// Path of the object directory
objDir string
objType string
tmpDir string
}
func newFSBackend(seafileDataDir string, objType string) (*fsBackend, error) {
objDir := path.Join(seafileDataDir, "storage", objType)
err := os.MkdirAll(objDir, os.ModePerm)
if err != nil {
return nil, err
}
tmpDir := path.Join(seafileDataDir, "tmpfiles")
err = os.MkdirAll(tmpDir, os.ModePerm)
if err != nil {
return nil, err
}
backend := new(fsBackend)
backend.objDir = objDir
backend.objType = objType
backend.tmpDir = tmpDir
return backend, nil
}
func (b *fsBackend) read(repoID string, objID string, w io.Writer) error {
p := path.Join(b.objDir, repoID, objID[:2], objID[2:])
fd, err := os.Open(p)
if err != nil {
return err
}
defer fd.Close()
_, err = io.Copy(w, fd)
if err != nil {
return err
}
return nil
}
func (b *fsBackend) write(repoID string, objID string, r io.Reader, sync bool) error {
parentDir := path.Join(b.objDir, repoID, objID[:2])
p := path.Join(parentDir, objID[2:])
err := os.MkdirAll(parentDir, os.ModePerm)
if err != nil {
return err
}
tFile, err := ioutil.TempFile(b.tmpDir, objID)
if err != nil {
return err
}
defer os.Remove(tFile.Name())
defer tFile.Close()
_, err = io.Copy(tFile, r)
if err != nil {
return err
}
err = os.Rename(tFile.Name(), p)
if err != nil {
return err
}
return nil
}
func (b *fsBackend) exists(repoID string, objID string) (bool, error) {
path := path.Join(b.objDir, repoID, objID[:2], objID[2:])
_, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return false, err
}
return true, err
}
return true, nil
}
func (b *fsBackend) stat(repoID string, objID string) (int64, error) {
path := path.Join(b.objDir, repoID, objID[:2], objID[2:])
fileInfo, err := os.Stat(path)
if err != nil {
return -1, err
}
return fileInfo.Size(), nil
}

View File

@@ -0,0 +1,56 @@
// Package objstore provides operations for commit, fs and block objects.
// It is low-level package used by commitmgr, fsmgr, blockmgr packages to access storage.
package objstore
import (
"io"
)
// ObjectStore is a container to access storage backend
type ObjectStore struct {
// can be "commit", "fs", or "block"
ObjType string
backend storageBackend
}
// storageBackend is the interface implemented by storage backends.
// An object store may have one or multiple storage backends.
type storageBackend interface {
// Read an object from backend and write the contents into w.
read(repoID string, objID string, w io.Writer) (err error)
// Write the contents from r to the object.
write(repoID string, objID string, r io.Reader, sync bool) (err error)
// exists checks whether an object exists.
exists(repoID string, objID string) (res bool, err error)
// stat calculates an object's size
stat(repoID string, objID string) (res int64, err error)
}
// New returns a new object store for a given type of objects.
// objType can be "commit", "fs", or "block".
func New(seafileConfPath string, seafileDataDir string, objType string) *ObjectStore {
obj := new(ObjectStore)
obj.ObjType = objType
obj.backend, _ = newFSBackend(seafileDataDir, objType)
return obj
}
//Read data from storage backends.
func (s *ObjectStore) Read(repoID string, objID string, w io.Writer) (err error) {
return s.backend.read(repoID, objID, w)
}
//Write data to storage backends.
func (s *ObjectStore) Write(repoID string, objID string, r io.Reader, sync bool) (err error) {
return s.backend.write(repoID, objID, r, sync)
}
//Check whether object exists.
func (s *ObjectStore) Exists(repoID string, objID string) (res bool, err error) {
return s.backend.exists(repoID, objID)
}
// Stat calculates object size.
func (s *ObjectStore) Stat(repoID string, objID string) (res int64, err error) {
return s.backend.stat(repoID, objID)
}

View File

@@ -0,0 +1,105 @@
package objstore
import (
"fmt"
"os"
"path"
"testing"
)
const (
testFile = "output.data"
seafileConfPath = "/tmp/conf"
seafileDataDir = "/tmp/conf/seafile-data"
repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
objID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
)
func createFile() error {
outputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
defer outputFile.Close()
outputString := "hello world!\n"
for i := 0; i < 10; i++ {
outputFile.WriteString(outputString)
}
return nil
}
func delFile() error {
err := os.Remove(testFile)
if err != nil {
return err
}
err = os.RemoveAll(seafileConfPath)
if err != nil {
return err
}
return nil
}
func TestMain(m *testing.M) {
err := createFile()
if err != nil {
fmt.Printf("Failed to create test file : %v\n", err)
os.Exit(1)
}
code := m.Run()
err = delFile()
if err != nil {
fmt.Printf("Failed to remove test file : %v\n", err)
os.Exit(1)
}
os.Exit(code)
}
func testWrite(t *testing.T) {
inputFile, err := os.Open(testFile)
if err != nil {
t.Errorf("Failed to open test file : %v\n", err)
}
defer inputFile.Close()
bend := New(seafileConfPath, seafileDataDir, "commit")
bend.Write(repoID, objID, inputFile, true)
}
func testRead(t *testing.T) {
outputFile, err := os.OpenFile(testFile, os.O_WRONLY, 0666)
if err != nil {
t.Errorf("Failed to open test file:%v\n", err)
}
defer outputFile.Close()
bend := New(seafileConfPath, seafileDataDir, "commit")
err = bend.Read(repoID, objID, outputFile)
if err != nil {
t.Errorf("Failed to read backend : %s\n", err)
}
}
func testExists(t *testing.T) {
bend := New(seafileConfPath, seafileDataDir, "commit")
ret, _ := bend.Exists(repoID, objID)
if !ret {
t.Errorf("File is not exist\n")
}
filePath := path.Join(seafileDataDir, "storage", "commit", repoID, objID[:2], objID[2:])
fileInfo, _ := os.Stat(filePath)
if fileInfo.Size() != 130 {
t.Errorf("File is exist, but the size of file is incorrect.\n")
}
}
func TestObjStore(t *testing.T) {
testWrite(t)
testRead(t)
testExists(t)
}

173
fileserver/quota.go Normal file
View File

@@ -0,0 +1,173 @@
package main
import (
"database/sql"
"fmt"
"github.com/haiwen/seafile-server/fileserver/repomgr"
"gopkg.in/ini.v1"
"path/filepath"
"strconv"
"strings"
)
// InfiniteQuota indicates that the quota is unlimited.
const (
InfiniteQuota = -2
)
func checkQuota(repoID string, delta int64) (int, error) {
if repoID == "" {
err := fmt.Errorf("bad argumets")
return -1, err
}
vInfo, err := repomgr.GetVirtualRepoInfo(repoID)
if err != nil {
err := fmt.Errorf("failed to get virtual repo: %v", err)
return -1, err
}
rRepoID := repoID
if vInfo != nil {
rRepoID = vInfo.OriginRepoID
}
user, err := repomgr.GetRepoOwner(rRepoID)
if err != nil {
err := fmt.Errorf("failed to get repo owner: %v", err)
return -1, err
}
if user == "" {
err := fmt.Errorf("repo %s has no owner", repoID)
return -1, err
}
quota, err := getUserQuota(user)
if err != nil {
err := fmt.Errorf("failed to get user quota: %v", err)
return -1, err
}
if quota == InfiniteQuota {
return 0, nil
}
usage, err := getUserUsage(user)
if err != nil || usage < 0 {
err := fmt.Errorf("failed to get user usage: %v", err)
return -1, err
}
usage += delta
if usage >= quota {
return 1, nil
}
return 0, nil
}
func getUserQuota(user string) (int64, error) {
var quota int64
sqlStr := "SELECT quota FROM UserQuota WHERE user=?"
row := seafileDB.QueryRow(sqlStr, user)
if err := row.Scan(&quota); err != nil {
if err != sql.ErrNoRows {
return -1, err
}
}
if quota <= 0 {
quota = getDefaultQuota()
}
return quota, nil
}
// Storage unit.
const (
KB = 1000
MB = 1000000
GB = 1000000000
TB = 1000000000000
)
func getDefaultQuota() int64 {
seafileConfPath := filepath.Join(absDataDir, "seafile.conf")
config, err := ini.Load(seafileConfPath)
if err != nil {
return InfiniteQuota
}
var quota int64
section, err := config.GetSection("quota")
if err != nil {
return InfiniteQuota
}
key, err := section.GetKey("default")
if err != nil {
return InfiniteQuota
}
quotaStr := key.String()
quota = parseQuota(quotaStr)
return quota
}
func parseQuota(quotaStr string) int64 {
var quota int64
var multiplier int64 = GB
if end := strings.Index(quotaStr, "kb"); end > 0 {
multiplier = KB
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
} else if end := strings.Index(quotaStr, "mb"); end > 0 {
multiplier = MB
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
} else if end := strings.Index(quotaStr, "gb"); end > 0 {
multiplier = GB
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
} else if end := strings.Index(quotaStr, "tb"); end > 0 {
multiplier = TB
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
} else {
quotaInt, err := strconv.ParseInt(quotaStr, 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
}
return quota
}
func getUserUsage(user string) (int64, error) {
var usage sql.NullInt64
sqlStr := "SELECT SUM(size) FROM " +
"RepoOwner o LEFT JOIN VirtualRepo v ON o.repo_id=v.repo_id, " +
"RepoSize WHERE " +
"owner_id=? AND o.repo_id=RepoSize.repo_id " +
"AND v.repo_id IS NULL"
row := seafileDB.QueryRow(sqlStr, user)
if err := row.Scan(&usage); err != nil {
if err != sql.ErrNoRows {
return -1, err
}
}
if usage.Valid {
return usage.Int64, nil
}
return 0, nil
}

View File

@@ -0,0 +1,643 @@
// Package repomgr manages repo objects and file operations in repos.
package repomgr
import (
"database/sql"
"fmt"
"log"
"time"
// Change to non-blank imports when use
_ "github.com/haiwen/seafile-server/fileserver/blockmgr"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
)
// Repo status
const (
RepoStatusNormal = iota
RepoStatusReadOnly
NRepoStatus
)
// Repo contains information about a repo.
type Repo struct {
ID string
Name string
Desc string
LastModifier string
LastModificationTime int64
HeadCommitID string
RootID string
IsCorrupted bool
// Set when repo is virtual
VirtualInfo *VRepoInfo
// ID for fs and block store
StoreID string
// Encrypted repo info
IsEncrypted bool
EncVersion int
Magic string
RandomKey string
Salt string
Version int
}
// VRepoInfo contains virtual repo information.
type VRepoInfo struct {
RepoID string
OriginRepoID string
Path string
BaseCommitID string
}
var seafileDB *sql.DB
// Init initialize status of repomgr package
func Init(seafDB *sql.DB) {
seafileDB = seafDB
}
// Get returns Repo object by repo ID.
func Get(id string) *Repo {
query := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` +
`Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` +
`LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` +
`WHERE r.repo_id = ? AND b.name = 'master'`
stmt, err := seafileDB.Prepare(query)
if err != nil {
log.Printf("failed to prepare sql : %s %v", query, err)
return nil
}
defer stmt.Close()
rows, err := stmt.Query(id)
if err != nil {
log.Printf("failed to query sql : %v", err)
return nil
}
defer rows.Close()
repo := new(Repo)
var originRepoID sql.NullString
var path sql.NullString
var baseCommitID sql.NullString
if rows.Next() {
err := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID)
if err != nil {
log.Printf("failed to scan sql rows : %v", err)
return nil
}
} else {
return nil
}
if repo.HeadCommitID == "" {
log.Printf("repo %s is corrupted", id)
return nil
}
if originRepoID.Valid {
repo.VirtualInfo = new(VRepoInfo)
repo.VirtualInfo.OriginRepoID = originRepoID.String
repo.StoreID = originRepoID.String
if path.Valid {
repo.VirtualInfo.Path = path.String
}
if baseCommitID.Valid {
repo.VirtualInfo.BaseCommitID = baseCommitID.String
}
} else {
repo.StoreID = repo.ID
}
commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
log.Printf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err)
return nil
}
repo.Name = commit.RepoName
repo.Desc = commit.RepoDesc
repo.LastModifier = commit.CreatorName
repo.LastModificationTime = commit.Ctime
repo.RootID = commit.RootID
repo.Version = commit.Version
if commit.Encrypted == "true" {
repo.IsEncrypted = true
repo.EncVersion = commit.EncVersion
if repo.EncVersion == 1 {
repo.Magic = commit.Magic
} else if repo.EncVersion == 2 {
repo.Magic = commit.Magic
repo.RandomKey = commit.RandomKey
} else if repo.EncVersion == 3 {
repo.Magic = commit.Magic
repo.RandomKey = commit.RandomKey
repo.Salt = commit.Salt
}
}
return repo
}
// RepoToCommit converts Repo to Commit.
func RepoToCommit(repo *Repo, commit *commitmgr.Commit) {
commit.RepoID = repo.ID
commit.RepoName = repo.Name
if repo.IsEncrypted {
commit.Encrypted = "true"
commit.EncVersion = repo.EncVersion
if repo.EncVersion == 1 {
commit.Magic = repo.Magic
} else if repo.EncVersion == 2 {
commit.Magic = repo.Magic
commit.RandomKey = repo.RandomKey
} else if repo.EncVersion == 3 {
commit.Magic = repo.Magic
commit.RandomKey = repo.RandomKey
commit.Salt = repo.Salt
}
} else {
commit.Encrypted = "false"
}
commit.Version = repo.Version
return
}
// GetEx return repo object even if it's corrupted.
func GetEx(id string) *Repo {
query := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` +
`Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` +
`LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` +
`WHERE r.repo_id = ? AND b.name = 'master'`
stmt, err := seafileDB.Prepare(query)
if err != nil {
log.Printf("failed to prepare sql : %s %v", query, err)
return nil
}
defer stmt.Close()
rows, err := stmt.Query(id)
if err != nil {
log.Printf("failed to query sql : %v", err)
return nil
}
defer rows.Close()
repo := new(Repo)
var originRepoID sql.NullString
var path sql.NullString
var baseCommitID sql.NullString
if rows.Next() {
err := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID)
if err != nil {
log.Printf("failed to scan sql rows : %v", err)
return nil
}
} else {
return nil
}
if originRepoID.Valid {
repo.VirtualInfo = new(VRepoInfo)
repo.VirtualInfo.OriginRepoID = originRepoID.String
repo.StoreID = originRepoID.String
if path.Valid {
repo.VirtualInfo.Path = path.String
}
if baseCommitID.Valid {
repo.VirtualInfo.BaseCommitID = baseCommitID.String
}
} else {
repo.StoreID = repo.ID
}
if repo.HeadCommitID == "" {
repo.IsCorrupted = true
return repo
}
commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
log.Printf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err)
repo.IsCorrupted = true
return nil
}
repo.Name = commit.RepoName
repo.LastModifier = commit.CreatorName
repo.LastModificationTime = commit.Ctime
repo.RootID = commit.RootID
repo.Version = commit.Version
if commit.Encrypted == "true" {
repo.IsEncrypted = true
repo.EncVersion = commit.EncVersion
if repo.EncVersion == 1 {
repo.Magic = commit.Magic
} else if repo.EncVersion == 2 {
repo.Magic = commit.Magic
repo.RandomKey = commit.RandomKey
} else if repo.EncVersion == 3 {
repo.Magic = commit.Magic
repo.RandomKey = commit.RandomKey
repo.Salt = commit.Salt
}
}
return repo
}
// GetVirtualRepoInfo return virtual repo info by repo id.
func GetVirtualRepoInfo(repoID string) (*VRepoInfo, error) {
sqlStr := "SELECT repo_id, origin_repo, path, base_commit FROM VirtualRepo WHERE repo_id = ?"
vRepoInfo := new(VRepoInfo)
row := seafileDB.QueryRow(sqlStr, repoID)
if err := row.Scan(&vRepoInfo.RepoID, &vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil {
if err != sql.ErrNoRows {
return nil, err
}
return nil, nil
}
return vRepoInfo, nil
}
// GetVirtualRepoInfoByOrigin return virtual repo info by origin repo id.
func GetVirtualRepoInfoByOrigin(originRepo string) ([]*VRepoInfo, error) {
sqlStr := "SELECT repo_id, origin_repo, path, base_commit " +
"FROM VirtualRepo WHERE origin_repo=?"
var vRepos []*VRepoInfo
row, err := seafileDB.Query(sqlStr, originRepo)
if err != nil {
return nil, err
}
defer row.Close()
for row.Next() {
vRepoInfo := new(VRepoInfo)
if err := row.Scan(&vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil {
if err != sql.ErrNoRows {
return nil, err
}
}
vRepos = append(vRepos, vRepoInfo)
}
return vRepos, nil
}
// GetEmailByToken return user's email by token.
func GetEmailByToken(repoID string, token string) (string, error) {
var email string
sqlStr := "SELECT email FROM RepoUserToken WHERE repo_id = ? AND token = ?"
row := seafileDB.QueryRow(sqlStr, repoID, token)
if err := row.Scan(&email); err != nil {
if err != sql.ErrNoRows {
return email, err
}
}
return email, nil
}
// GetRepoStatus return repo status by repo id.
func GetRepoStatus(repoID string) (int, error) {
var status int
sqlStr := "SELECT status FROM RepoInfo WHERE repo_id=?"
row := seafileDB.QueryRow(sqlStr, repoID)
if err := row.Scan(&status); err != nil {
if err != sql.ErrNoRows {
return status, err
}
}
return status, nil
}
// TokenPeerInfoExists check if the token exists.
func TokenPeerInfoExists(token string) (bool, error) {
var exists string
sqlStr := "SELECT token FROM RepoTokenPeerInfo WHERE token=?"
row := seafileDB.QueryRow(sqlStr, token)
if err := row.Scan(&exists); err != nil {
if err != sql.ErrNoRows {
return false, err
}
return false, nil
}
return true, nil
}
// AddTokenPeerInfo add token peer info to RepoTokenPeerInfo table.
func AddTokenPeerInfo(token, peerID, peerIP, peerName, clientVer string, syncTime int64) error {
sqlStr := "INSERT INTO RepoTokenPeerInfo (token, peer_id, peer_ip, peer_name, sync_time, client_ver)" +
"VALUES (?, ?, ?, ?, ?, ?)"
if _, err := seafileDB.Exec(sqlStr, token, peerID, peerIP, peerName, syncTime, clientVer); err != nil {
return err
}
return nil
}
// UpdateTokenPeerInfo update token peer info to RepoTokenPeerInfo table.
func UpdateTokenPeerInfo(token, peerID, clientVer string, syncTime int64) error {
sqlStr := "UPDATE RepoTokenPeerInfo SET " +
"peer_ip=?, sync_time=?, client_ver=? WHERE token=?"
if _, err := seafileDB.Exec(sqlStr, peerID, syncTime, clientVer, token); err != nil {
return err
}
return nil
}
// GetUploadTmpFile gets the timp file path of upload file.
func GetUploadTmpFile(repoID, filePath string) (string, error) {
var filePathNoSlash string
if filePath[0] == '/' {
filePathNoSlash = filePath[1:]
} else {
filePathNoSlash = filePath
filePath = "/" + filePath
}
var tmpFile string
sqlStr := "SELECT tmp_file_path FROM WebUploadTempFiles WHERE repo_id = ? AND file_path = ?"
row := seafileDB.QueryRow(sqlStr, repoID, filePath)
if err := row.Scan(&tmpFile); err != nil {
if err != sql.ErrNoRows {
return "", err
}
}
if tmpFile == "" {
row := seafileDB.QueryRow(sqlStr, repoID, filePathNoSlash)
if err := row.Scan(&tmpFile); err != nil {
if err != sql.ErrNoRows {
return "", err
}
}
}
return tmpFile, nil
}
// AddUploadTmpFile adds the tmp file path of upload file.
func AddUploadTmpFile(repoID, filePath, tmpFile string) error {
if filePath[0] != '/' {
filePath = "/" + filePath
}
sqlStr := "INSERT INTO WebUploadTempFiles (repo_id, file_path, tmp_file_path) VALUES (?, ?, ?)"
_, err := seafileDB.Exec(sqlStr, repoID, filePath, tmpFile)
if err != nil {
return err
}
return nil
}
// DelUploadTmpFile deletes the tmp file path of upload file.
func DelUploadTmpFile(repoID, filePath string) error {
var filePathNoSlash string
if filePath[0] == '/' {
filePathNoSlash = filePath[1:]
} else {
filePathNoSlash = filePath
filePath = "/" + filePath
}
sqlStr := "DELETE FROM WebUploadTempFiles WHERE repo_id = ? AND file_path IN (?, ?)"
_, err := seafileDB.Exec(sqlStr, repoID, filePath, filePathNoSlash)
if err != nil {
return err
}
return nil
}
// SetRepoCommitToDb updates the table of RepoInfo.
func SetRepoCommitToDb(repoID, repoName string, updateTime int64, version int, isEncrypted string, lastModifier string) error {
var exists int
var encrypted int
sqlStr := "SELECT 1 FROM RepoInfo WHERE repo_id=?"
row := seafileDB.QueryRow(sqlStr, repoID)
if err := row.Scan(&exists); err != nil {
if err != sql.ErrNoRows {
return err
}
}
if updateTime == 0 {
updateTime = time.Now().Unix()
}
if isEncrypted == "true" {
encrypted = 1
}
if exists == 1 {
sqlStr := "UPDATE RepoInfo SET name=?, update_time=?, version=?, is_encrypted=?, " +
"last_modifier=? WHERE repo_id=?"
if _, err := seafileDB.Exec(sqlStr, repoName, updateTime, version, encrypted, lastModifier, repoID); err != nil {
return err
}
} else {
sqlStr := "INSERT INTO RepoInfo (repo_id, name, update_time, version, is_encrypted, last_modifier) " +
"VALUES (?, ?, ?, ?, ?, ?)"
if _, err := seafileDB.Exec(sqlStr, repoID, repoName, updateTime, version, encrypted, lastModifier); err != nil {
return err
}
}
return nil
}
// SetVirtualRepoBaseCommitPath updates the table of VirtualRepo.
func SetVirtualRepoBaseCommitPath(repoID, baseCommitID, newPath string) error {
sqlStr := "UPDATE VirtualRepo SET base_commit=?, path=? WHERE repo_id=?"
if _, err := seafileDB.Exec(sqlStr, baseCommitID, newPath, repoID); err != nil {
return err
}
return nil
}
// GetVirtualRepoIDsByOrigin return the virtual repo ids by origin repo id.
func GetVirtualRepoIDsByOrigin(repoID string) ([]string, error) {
sqlStr := "SELECT repo_id FROM VirtualRepo WHERE origin_repo=?"
var id string
var ids []string
row, err := seafileDB.Query(sqlStr, repoID)
if err != nil {
return nil, err
}
defer row.Close()
for row.Next() {
if err := row.Scan(&id); err != nil {
if err != sql.ErrNoRows {
return nil, err
}
}
ids = append(ids, id)
}
return ids, nil
}
// DelVirtualRepo deletes virtual repo from database.
func DelVirtualRepo(repoID string, cloudMode bool) error {
err := removeVirtualRepoOndisk(repoID, cloudMode)
if err != nil {
err := fmt.Errorf("failed to remove virtual repo on disk: %v", err)
return err
}
sqlStr := "DELETE FROM VirtualRepo WHERE repo_id = ?"
_, err = seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
return nil
}
func removeVirtualRepoOndisk(repoID string, cloudMode bool) error {
sqlStr := "DELETE FROM Repo WHERE repo_id = ?"
_, err := seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "SELECT name, repo_id, commit_id FROM Branch WHERE repo_id=?"
rows, err := seafileDB.Query(sqlStr, repoID)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var name, id, commitID string
if err := rows.Scan(&name, &id, &commitID); err != nil {
if err != sql.ErrNoRows {
return err
}
}
sqlStr := "DELETE FROM RepoHead WHERE branch_name = ? AND repo_id = ?"
_, err := seafileDB.Exec(sqlStr, name, id)
if err != nil {
return err
}
sqlStr = "DELETE FROM Branch WHERE name=? AND repo_id=?"
_, err = seafileDB.Exec(sqlStr, name, id)
if err != nil {
return err
}
}
sqlStr = "DELETE FROM RepoOwner WHERE repo_id = ?"
_, err = seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "DELETE FROM SharedRepo WHERE repo_id = ?"
_, err = seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "DELETE FROM RepoGroup WHERE repo_id = ?"
_, err = seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
if !cloudMode {
sqlStr = "DELETE FROM InnerPubRepo WHERE repo_id = ?"
_, err := seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
}
sqlStr = "DELETE FROM RepoUserToken WHERE repo_id = ?"
_, err = seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "DELETE FROM RepoValidSince WHERE repo_id = ?"
_, err = seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "DELETE FROM RepoSize WHERE repo_id = ?"
_, err = seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
var exists int
sqlStr = "SELECT 1 FROM GarbageRepos WHERE repo_id=?"
row := seafileDB.QueryRow(sqlStr, repoID)
if err := row.Scan(&exists); err != nil {
if err != sql.ErrNoRows {
return err
}
}
if exists == 0 {
sqlStr = "INSERT INTO GarbageRepos (repo_id) VALUES (?)"
_, err := seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
} else {
sqlStr = "REPLACE INTO GarbageRepos (repo_id) VALUES (?)"
_, err := seafileDB.Exec(sqlStr, repoID)
if err != nil {
return err
}
}
return nil
}
// IsVirtualRepo check if the repo is a virtual reop.
func IsVirtualRepo(repoID string) (bool, error) {
var exists int
sqlStr := "SELECT 1 FROM VirtualRepo WHERE repo_id = ?"
row := seafileDB.QueryRow(sqlStr, repoID)
if err := row.Scan(&exists); err != nil {
if err != sql.ErrNoRows {
return false, err
}
return false, nil
}
return true, nil
}
// GetRepoOwner get the owner of repo.
func GetRepoOwner(repoID string) (string, error) {
var owner string
sqlStr := "SELECT owner_id FROM RepoOwner WHERE repo_id=?"
row := seafileDB.QueryRow(sqlStr, repoID)
if err := row.Scan(&owner); err != nil {
if err != sql.ErrNoRows {
return "", err
}
}
return owner, nil
}

View File

@@ -0,0 +1,84 @@
package repomgr
import (
"database/sql"
"fmt"
"os"
"testing"
_ "github.com/go-sql-driver/mysql"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/searpc"
)
const (
// repoID = "9646f13e-bbab-4eaf-9a84-fb6e1cd776b3"
user = "seafile"
password = "seafile"
host = "127.0.0.1"
port = 3306
dbName = "seafile-db"
useTLS = false
seafileConfPath = "/root/conf"
seafileDataDir = "/root/conf/seafile-data"
repoName = "repo"
userName = "seafile@seafile.com"
encVersion = 2
pipePath = "/root/runtime/seafile.sock"
service = "seafserv-threaded-rpcserver"
)
var repoID string
var client *searpc.Client
func createRepo() string {
id, err := client.Call("seafile_create_repo", repoName, "", userName, nil, encVersion)
if err != nil {
fmt.Printf("failed to create repo.\n")
}
if id == nil {
fmt.Printf("repo id is nil.\n")
os.Exit(1)
}
repoid, ok := id.(string)
if !ok {
fmt.Printf("returned value isn't repo id.\n")
}
return repoid
}
func delRepo() {
_, err := client.Call("seafile_destroy_repo", repoID)
if err != nil {
fmt.Printf("failed to del repo.\n")
os.Exit(1)
}
}
func TestMain(m *testing.M) {
client = searpc.Init(pipePath, service)
repoID = createRepo()
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t", user, password, host, port, dbName, useTLS)
seafDB, err := sql.Open("mysql", dsn)
if err != nil {
fmt.Printf("Failed to open database: %v", err)
}
Init(seafDB)
commitmgr.Init(seafileConfPath, seafileDataDir)
code := m.Run()
delRepo()
os.Exit(code)
}
func TestGet(t *testing.T) {
repo := Get(repoID)
if repo == nil {
t.Errorf("failed to get repo : %s.\n", repoID)
t.FailNow()
}
if repo.ID != repoID {
t.Errorf("failed to get repo : %s.\n", repoID)
}
}

122
fileserver/searpc/searpc.go Normal file
View File

@@ -0,0 +1,122 @@
// Package searpc implements searpc client protocol with unix pipe transport.
package searpc
import (
"bufio"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"net"
)
// Client represents a connections to the RPC server.
type Client struct {
// path of the named pipe
pipePath string
// RPC service name
Service string
}
type request struct {
Service string `json:"service"`
Request string `json:"request"`
}
// Init initializes rpc client.
func Init(pipePath string, service string) *Client {
client := new(Client)
client.pipePath = pipePath
client.Service = service
return client
}
// Call calls the RPC function funcname with variadic parameters.
// The return value of the RPC function is return as interface{} type
// The true returned type can be int32, int64, string, struct (object), list of struct (objects) or JSON
func (c *Client) Call(funcname string, params ...interface{}) (interface{}, error) {
// TODO: use reflection to compose requests and parse results.
var unixAddr *net.UnixAddr
unixAddr, err := net.ResolveUnixAddr("unix", c.pipePath)
if err != nil {
err := fmt.Errorf("failed to resolve unix addr when calling rpc : %v", err)
return nil, err
}
conn, err := net.DialUnix("unix", nil, unixAddr)
if err != nil {
err := fmt.Errorf("failed to dial unix when calling rpc : %v", err)
return nil, err
}
defer conn.Close()
var req []interface{}
req = append(req, funcname)
req = append(req, params...)
jsonstr, err := json.Marshal(req)
if err != nil {
err := fmt.Errorf("failed to encode rpc call to json : %v", err)
return nil, err
}
reqHeader := new(request)
reqHeader.Service = c.Service
reqHeader.Request = string(jsonstr)
jsonstr, err = json.Marshal(reqHeader)
if err != nil {
err := fmt.Errorf("failed to convert object to json : %v", err)
return nil, err
}
header := make([]byte, 4)
binary.LittleEndian.PutUint32(header, uint32(len(jsonstr)))
_, err = conn.Write([]byte(header))
if err != nil {
err := fmt.Errorf("Failed to write rpc request header : %v", err)
return nil, err
}
_, err = conn.Write([]byte(jsonstr))
if err != nil {
err := fmt.Errorf("Failed to write rpc request body : %v", err)
return nil, err
}
reader := bufio.NewReader(conn)
buflen := make([]byte, 4)
_, err = io.ReadFull(reader, buflen)
if err != nil {
err := fmt.Errorf("failed to read response header from rpc server : %v", err)
return nil, err
}
retlen := binary.LittleEndian.Uint32(buflen)
msg := make([]byte, retlen)
_, err = io.ReadFull(reader, msg)
if err != nil {
err := fmt.Errorf("failed to read response body from rpc server : %v", err)
return nil, err
}
retlist := make(map[string]interface{})
err = json.Unmarshal(msg, &retlist)
if err != nil {
err := fmt.Errorf("failed to decode rpc response : %v", err)
return nil, err
}
if _, ok := retlist["err_code"]; ok {
err := fmt.Errorf("searpc server returned error : %v", retlist["err_msg"])
return nil, err
}
if _, ok := retlist["ret"]; ok {
ret := retlist["ret"]
return ret, nil
}
err = fmt.Errorf("No value returned")
return nil, err
}

View File

@@ -0,0 +1,81 @@
package searpc
import (
"os"
"testing"
)
const (
repoName = "repo"
userName = "seafile@seafile.com"
encVersion = 2
pipePath = "/root/runtime/seafile.sock"
service = "seafserv-threaded-rpcserver"
)
var client *Client
func TestMain(m *testing.M) {
client = Init(pipePath, service)
code := m.Run()
os.Exit(code)
}
func TestCallRpc(t *testing.T) {
repoID, err := client.Call("seafile_create_repo", repoName, "", userName, nil, encVersion)
if err != nil {
t.Errorf("failed to create repo.\n")
}
if repoID == nil {
t.Errorf("repo id is nil.\n")
t.FailNow()
}
repo, err := client.Call("seafile_get_repo", repoID)
if err != nil {
t.Errorf("failed to get repo.\n")
}
if repo == nil {
t.Errorf("repo is nil.\n")
t.FailNow()
}
repoMap, ok := repo.(map[string]interface{})
if !ok {
t.Errorf("failed to assert the type.\n")
t.FailNow()
}
if repoMap["id"] != repoID {
t.Errorf("wrong repo id.\n")
}
repoList, err := client.Call("seafile_get_repo_list", -1, -1, "")
if err != nil {
t.Errorf("failed to get repo list.\n")
}
if repoList == nil {
t.Errorf("repo list is nil.\n")
t.FailNow()
}
var exists bool
repos, ok := repoList.([]interface{})
if !ok {
t.Errorf("failed to assert the type.\n")
t.FailNow()
}
for _, v := range repos {
repo, ok := v.(map[string]interface{})
if !ok {
t.Errorf("failed to assert the type.\n")
t.FailNow()
}
if repo["id"] == repoID {
exists = true
break
}
}
if exists != true {
t.Errorf("can't find repo %s in repo list.\n", repoID)
}
client.Call("seafile_destroy_repo", repoID)
}

View File

@@ -0,0 +1,2 @@
// Package group manages group membership and group shares.
package group

View File

@@ -0,0 +1,2 @@
// Package public manager inner public shares.
package public

646
fileserver/share/share.go Normal file
View File

@@ -0,0 +1,646 @@
// Package share manages share relations.
// share: manages personal shares and provide high level permission check functions.
package share
import (
"database/sql"
"fmt"
"log"
"path/filepath"
"strconv"
"strings"
"github.com/haiwen/seafile-server/fileserver/repomgr"
)
type group struct {
id int
groupName string
creatorName string
timestamp int64
parentGroupID int
}
var ccnetDB *sql.DB
var seafileDB *sql.DB
var groupTableName string
var cloudMode bool
// Init ccnetDB, seafileDB, groupTableName, cloudMode
func Init(cnDB *sql.DB, seafDB *sql.DB, grpTableName string, clMode bool) {
ccnetDB = cnDB
seafileDB = seafDB
groupTableName = grpTableName
cloudMode = clMode
}
// CheckPerm get user's repo permission
func CheckPerm(repoID string, user string) string {
var perm string
vInfo, err := repomgr.GetVirtualRepoInfo(repoID)
if err != nil {
log.Printf("Failed to get virtual repo info by repo id %s: %v", repoID, err)
}
if vInfo != nil {
perm = checkVirtualRepoPerm(repoID, vInfo.OriginRepoID, user, vInfo.Path)
return perm
}
perm = checkRepoSharePerm(repoID, user)
return perm
}
// GetGroupReposByUser get group repos by user
func GetGroupReposByUser(user string, orgID int) ([]*SharedRepo, error) {
groups, err := getGroupsByUser(user, true)
if err != nil {
return nil, err
}
if len(groups) == 0 {
return nil, nil
}
var sqlBuilder strings.Builder
if orgID < 0 {
sqlBuilder.WriteString("SELECT g.repo_id, " +
"user_name, permission, commit_id, " +
"i.name, i.update_time, i.version " +
"FROM RepoGroup g " +
"LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, " +
"Branch b WHERE g.repo_id = b.repo_id AND " +
"b.name = 'master' AND group_id IN (")
} else {
sqlBuilder.WriteString("SELECT g.repo_id, " +
"owner, permission, commit_id, " +
"i.name, i.update_time, i.version " +
"FROM OrgGroupRepo g " +
"LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, " +
"Branch b WHERE g.repo_id = b.repo_id AND " +
"b.name = 'master' AND group_id IN (")
}
for i := 0; i < len(groups); i++ {
sqlBuilder.WriteString(strconv.Itoa(groups[i].id))
if i+1 < len(groups) {
sqlBuilder.WriteString(",")
}
}
sqlBuilder.WriteString(" ) ORDER BY group_id")
rows, err := seafileDB.Query(sqlBuilder.String())
if err != nil {
return nil, err
}
defer rows.Close()
var repos []*SharedRepo
for rows.Next() {
gRepo := new(SharedRepo)
if err := rows.Scan(&gRepo.ID, &gRepo.Owner,
&gRepo.Permission, &gRepo.HeadCommitID,
&gRepo.Name, &gRepo.MTime, &gRepo.Version); err == nil {
repos = append(repos, gRepo)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return repos, nil
}
func checkVirtualRepoPerm(repoID, originRepoID, user, vPath string) string {
owner, err := repomgr.GetRepoOwner(originRepoID)
if err != nil {
log.Printf("Failed to get repo owner: %v", err)
}
var perm string
if owner != "" && owner == user {
perm = "rw"
return perm
}
perm = checkPermOnParentRepo(originRepoID, user, vPath)
if perm != "" {
return perm
}
perm = checkRepoSharePerm(originRepoID, user)
return perm
}
func getUserGroups(sqlStr string, args ...interface{}) ([]group, error) {
rows, err := ccnetDB.Query(sqlStr, args...)
if err != nil {
return nil, err
}
defer rows.Close()
var groups []group
var g group
for rows.Next() {
if err := rows.Scan(&g.id, &g.groupName,
&g.creatorName, &g.timestamp,
&g.parentGroupID); err == nil {
groups = append(groups, g)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return groups, nil
}
func getGroupsByUser(userName string, returnAncestors bool) ([]group, error) {
sqlStr := fmt.Sprintf("SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "+
"`%s` g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC",
groupTableName)
groups, err := getUserGroups(sqlStr, userName)
if err != nil {
err := fmt.Errorf("Failed to get groups by user %s: %v", userName, err)
return nil, err
}
if !returnAncestors {
return groups, nil
}
sqlStr = ""
var ret []group
for _, group := range groups {
parentGroupID := group.parentGroupID
groupID := group.id
if parentGroupID != 0 {
if sqlStr == "" {
sqlStr = fmt.Sprintf("SELECT path FROM GroupStructure WHERE group_id IN (%d",
groupID)
} else {
sqlStr += fmt.Sprintf(", %d", groupID)
}
} else {
ret = append(ret, group)
}
}
if sqlStr != "" {
sqlStr += ")"
paths, err := getGroupPaths(sqlStr)
if err != nil {
log.Printf("Failed to get group paths: %v", err)
}
if paths == "" {
err := fmt.Errorf("Failed to get groups path for user %s", userName)
return nil, err
}
sqlStr = fmt.Sprintf("SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "+
"`%s` g WHERE g.group_id IN (%s) ORDER BY g.group_id DESC",
groupTableName, paths)
groups, err := getUserGroups(sqlStr)
if err != nil {
return nil, err
}
ret = append(ret, groups...)
}
return ret, nil
}
func getGroupPaths(sqlStr string) (string, error) {
var paths string
rows, err := ccnetDB.Query(sqlStr)
if err != nil {
return paths, err
}
defer rows.Close()
var path string
for rows.Next() {
rows.Scan(&path)
if paths == "" {
paths = path
} else {
paths += fmt.Sprintf(", %s", path)
}
}
if err := rows.Err(); err != nil {
return "", err
}
return paths, nil
}
func checkGroupPermByUser(repoID string, userName string) (string, error) {
groups, err := getGroupsByUser(userName, false)
if err != nil {
return "", err
}
if len(groups) == 0 {
return "", nil
}
var sqlBuilder strings.Builder
sqlBuilder.WriteString("SELECT permission FROM RepoGroup WHERE repo_id = ? AND group_id IN (")
for i := 0; i < len(groups); i++ {
sqlBuilder.WriteString(strconv.Itoa(groups[i].id))
if i+1 < len(groups) {
sqlBuilder.WriteString(",")
}
}
sqlBuilder.WriteString(")")
rows, err := seafileDB.Query(sqlBuilder.String(), repoID)
if err != nil {
err := fmt.Errorf("Failed to get group permission by user %s: %v", userName, err)
return "", err
}
defer rows.Close()
var perm string
var origPerm string
for rows.Next() {
if err := rows.Scan(&perm); err == nil {
if perm == "rw" {
origPerm = perm
} else if perm == "r" && origPerm == "" {
origPerm = perm
}
}
}
if err := rows.Err(); err != nil {
err := fmt.Errorf("Failed to get group permission for user %s: %v", userName, err)
return "", err
}
return origPerm, nil
}
func checkSharedRepoPerm(repoID string, email string) (string, error) {
sqlStr := "SELECT permission FROM SharedRepo WHERE repo_id=? AND to_email=?"
row := seafileDB.QueryRow(sqlStr, repoID, email)
var perm string
if err := row.Scan(&perm); err != nil {
if err != sql.ErrNoRows {
err := fmt.Errorf("Failed to check shared repo permission: %v", err)
return "", err
}
}
return perm, nil
}
func checkInnerPubRepoPerm(repoID string) (string, error) {
sqlStr := "SELECT permission FROM InnerPubRepo WHERE repo_id=?"
row := seafileDB.QueryRow(sqlStr, repoID)
var perm string
if err := row.Scan(&perm); err != nil {
if err != sql.ErrNoRows {
err := fmt.Errorf("Failed to check inner public repo permission: %v", err)
return "", err
}
}
return perm, nil
}
func checkRepoSharePerm(repoID string, userName string) string {
owner, err := repomgr.GetRepoOwner(repoID)
if err != nil {
log.Printf("Failed to get repo owner: %v", err)
}
if owner != "" && owner == userName {
perm := "rw"
return perm
}
perm, err := checkSharedRepoPerm(repoID, userName)
if err != nil {
log.Printf("Failed to get shared repo permission: %v", err)
}
if perm != "" {
return perm
}
perm, err = checkGroupPermByUser(repoID, userName)
if err != nil {
log.Printf("Failed to get group permission by user %s: %v", userName, err)
}
if perm != "" {
return perm
}
if !cloudMode {
perm, err = checkInnerPubRepoPerm(repoID)
if err != nil {
log.Printf("Failed to get inner pulic repo permission by repo id %s: %v", repoID, err)
return ""
}
return perm
}
return ""
}
func getSharedDirsToUser(originRepoID string, toEmail string) (map[string]string, error) {
dirs := make(map[string]string)
sqlStr := "SELECT v.path, s.permission FROM SharedRepo s, VirtualRepo v WHERE " +
"s.repo_id = v.repo_id AND s.to_email = ? AND v.origin_repo = ?"
rows, err := seafileDB.Query(sqlStr, toEmail, originRepoID)
if err != nil {
err := fmt.Errorf("Failed to get shared directories by user %s: %v", toEmail, err)
return nil, err
}
defer rows.Close()
var path string
var perm string
for rows.Next() {
if err := rows.Scan(&path, &perm); err == nil {
dirs[path] = perm
}
}
if err := rows.Err(); err != nil {
err := fmt.Errorf("Failed to get shared directories by user %s: %v", toEmail, err)
return nil, err
}
return dirs, nil
}
func getDirPerm(perms map[string]string, path string) string {
tmp := path
var perm string
for tmp != "" {
if perm, exists := perms[tmp]; exists {
return perm
}
tmp = filepath.Dir(tmp)
}
return perm
}
func convertGroupListToStr(groups []group) string {
var groupIDs strings.Builder
for i, group := range groups {
groupIDs.WriteString(strconv.Itoa(group.id))
if i+1 < len(groups) {
groupIDs.WriteString(",")
}
}
return groupIDs.String()
}
func getSharedDirsToGroup(originRepoID string, groups []group) (map[string]string, error) {
dirs := make(map[string]string)
groupIDs := convertGroupListToStr(groups)
sqlStr := fmt.Sprintf("SELECT v.path, s.permission "+
"FROM RepoGroup s, VirtualRepo v WHERE "+
"s.repo_id = v.repo_id AND v.origin_repo = ? "+
"AND s.group_id in (%s)", groupIDs)
rows, err := seafileDB.Query(sqlStr, originRepoID)
if err != nil {
err := fmt.Errorf("Failed to get shared directories: %v", err)
return nil, err
}
defer rows.Close()
var path string
var perm string
for rows.Next() {
if err := rows.Scan(&path, &perm); err == nil {
dirs[path] = perm
}
}
if err := rows.Err(); err != nil {
err := fmt.Errorf("Failed to get shared directories: %v", err)
return nil, err
}
return dirs, nil
}
func checkPermOnParentRepo(originRepoID, user, vPath string) string {
var perm string
userPerms, err := getSharedDirsToUser(originRepoID, user)
if err != nil {
log.Printf("Failed to get all shared folder perms in parent repo %.8s for user %s", originRepoID, user)
return ""
}
if len(userPerms) > 0 {
perm = getDirPerm(userPerms, vPath)
if perm != "" {
return perm
}
}
groups, err := getGroupsByUser(user, false)
if err != nil {
log.Printf("Failed to get groups by user %s: %v", user, err)
}
if len(groups) == 0 {
return perm
}
groupPerms, err := getSharedDirsToGroup(originRepoID, groups)
if err != nil {
log.Printf("Failed to get all shared folder perm from parent repo %.8s to all user groups", originRepoID)
return ""
}
if len(groupPerms) == 0 {
return ""
}
perm = getDirPerm(groupPerms, vPath)
return perm
}
// SharedRepo is a shared repo object
type SharedRepo struct {
Version int `json:"version"`
ID string `json:"id"`
HeadCommitID string `json:"head_commit_id"`
Name string `json:"name"`
MTime int64 `json:"mtime"`
Permission string `json:"permission"`
Type string `json:"type"`
Owner string `json:"owner"`
}
// GetReposByOwner get repos by owner
func GetReposByOwner(email string) ([]*SharedRepo, error) {
var repos []*SharedRepo
query := "SELECT o.repo_id, b.commit_id, i.name, " +
"i.version, i.update_time, i.last_modifier FROM " +
"RepoOwner o LEFT JOIN Branch b ON o.repo_id = b.repo_id " +
"LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id " +
"WHERE owner_id=? AND " +
"o.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) " +
"ORDER BY i.update_time DESC, o.repo_id"
stmt, err := seafileDB.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(email)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
repo := new(SharedRepo)
var repoName, lastModifier sql.NullString
if err := rows.Scan(&repo.ID, &repo.HeadCommitID,
&repoName, &repo.Version, &repo.MTime,
&lastModifier); err == nil {
if repo.HeadCommitID == "" {
continue
}
if !repoName.Valid || !lastModifier.Valid {
continue
}
if repoName.String == "" || lastModifier.String == "" {
continue
}
repo.Name = repoName.String
repos = append(repos, repo)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return repos, nil
}
// ListInnerPubRepos get inner public repos
func ListInnerPubRepos() ([]*SharedRepo, error) {
query := "SELECT InnerPubRepo.repo_id, " +
"owner_id, permission, commit_id, i.name, " +
"i.update_time, i.version " +
"FROM InnerPubRepo " +
"LEFT JOIN RepoInfo i ON InnerPubRepo.repo_id = i.repo_id, RepoOwner, Branch " +
"WHERE InnerPubRepo.repo_id=RepoOwner.repo_id AND " +
"InnerPubRepo.repo_id = Branch.repo_id AND Branch.name = 'master'"
stmt, err := seafileDB.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
return nil, err
}
defer rows.Close()
var repos []*SharedRepo
for rows.Next() {
repo := new(SharedRepo)
var repoName sql.NullString
if err := rows.Scan(&repo.ID, &repo.Owner,
&repo.Permission, &repo.HeadCommitID, &repoName,
&repo.MTime, &repo.Version); err == nil {
if !repoName.Valid {
continue
}
if repoName.String == "" {
continue
}
repo.Name = repoName.String
repos = append(repos, repo)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return repos, nil
}
// ListShareRepos list share repos by email
func ListShareRepos(email, columnType string) ([]*SharedRepo, error) {
var repos []*SharedRepo
var query string
if columnType == "from_email" {
query = "SELECT sh.repo_id, to_email, " +
"permission, commit_id, " +
"i.name, i.update_time, i.version FROM " +
"SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b " +
"WHERE from_email=? AND " +
"sh.repo_id = b.repo_id AND " +
"b.name = 'master' " +
"ORDER BY i.update_time DESC, sh.repo_id"
} else if columnType == "to_email" {
query = "SELECT sh.repo_id, from_email, " +
"permission, commit_id, " +
"i.name, i.update_time, i.version FROM " +
"SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b " +
"WHERE to_email=? AND " +
"sh.repo_id = b.repo_id AND " +
"b.name = 'master' " +
"ORDER BY i.update_time DESC, sh.repo_id"
} else {
err := fmt.Errorf("Wrong column type: %s", columnType)
return nil, err
}
stmt, err := seafileDB.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(email)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
repo := new(SharedRepo)
var repoName sql.NullString
if err := rows.Scan(&repo.ID, &repo.Owner,
&repo.Permission, &repo.HeadCommitID,
&repoName, &repo.MTime, &repo.Version); err == nil {
if !repoName.Valid {
continue
}
if repoName.String == "" {
continue
}
repo.Name = repoName.String
repos = append(repos, repo)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return repos, nil
}

239
fileserver/size_sched.go Normal file
View File

@@ -0,0 +1,239 @@
package main
import (
"fmt"
"log"
"path/filepath"
"gopkg.in/ini.v1"
"database/sql"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/diff"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/repomgr"
)
// Job is the job object of workpool.
type Job struct {
callback jobCB
repoID string
}
type jobCB func(repoID string) error
var jobs = make(chan Job, 100)
func sizeSchedulerInit() {
var n int = 1
var seafileConfPath string
if centralDir != "" {
seafileConfPath = filepath.Join(centralDir, "seafile.conf")
} else {
seafileConfPath = filepath.Join(absDataDir, "seafile.conf")
}
config, err := ini.Load(seafileConfPath)
if err != nil {
log.Fatalf("Failed to load seafile.conf: %v", err)
}
if section, err := config.GetSection("scheduler"); err == nil {
if key, err := section.GetKey("size_sched_thread_num"); err == nil {
num, err := key.Int()
if err == nil {
n = num
}
}
}
go createWorkerPool(n)
}
// need to start a go routine
func createWorkerPool(n int) {
for i := 0; i < n; i++ {
go worker()
}
}
func worker() {
for {
select {
case job := <-jobs:
if job.callback != nil {
err := job.callback(job.repoID)
if err != nil {
log.Printf("failed to call jobs: %v.\n", err)
}
}
}
}
}
func updateRepoSize(repoID string) {
job := Job{computeRepoSize, repoID}
jobs <- job
}
func computeRepoSize(repoID string) error {
var size int64
var fileCount int64
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %s", repoID)
return err
}
info, err := getOldRepoInfo(repoID)
if err != nil {
err := fmt.Errorf("failed to get old repo info: %v", err)
return err
}
if info != nil && info.HeadID == repo.HeadCommitID {
return nil
}
head, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get head commit %s", repo.HeadCommitID)
return err
}
var oldHead *commitmgr.Commit
if info != nil {
commit, _ := commitmgr.Load(repo.ID, info.HeadID)
oldHead = commit
}
if info != nil && oldHead != nil {
var results []*diff.DiffEntry
var changeSize int64
var changeFileCount int64
err := diff.DiffCommits(oldHead, head, &results, false)
if err != nil {
err := fmt.Errorf("failed to do diff commits: %v", err)
return err
}
for _, de := range results {
if de.Status == diff.DiffStatusDeleted {
changeSize -= de.Size
changeFileCount--
} else if de.Status == diff.DiffStatusAdded {
changeSize += de.Size
changeFileCount++
} else if de.Status == diff.DiffStatusModified {
changeSize = changeSize + de.Size - de.OriginSize
}
}
size = info.Size + changeSize
fileCount = info.FileCount + changeFileCount
} else {
info, err := fsmgr.GetFileCountInfoByPath(repo.StoreID, repo.RootID, "/")
if err != nil {
err := fmt.Errorf("failed to get file count")
return err
}
fileCount = info.FileCount
size = info.Size
}
err = setRepoSizeAndFileCount(repoID, repo.HeadCommitID, size, fileCount)
if err != nil {
err := fmt.Errorf("failed to set repo size and file count %s: %v", repoID, err)
return err
}
return nil
}
func setRepoSizeAndFileCount(repoID, newHeadID string, size, fileCount int64) error {
trans, err := seafileDB.Begin()
if err != nil {
err := fmt.Errorf("failed to start transaction: %v", err)
return err
}
var headID string
sqlStr := "SELECT head_id FROM RepoSize WHERE repo_id=?"
row := trans.QueryRow(sqlStr, repoID)
if err := row.Scan(&headID); err != nil {
if err != sql.ErrNoRows {
trans.Rollback()
return err
}
}
if headID == "" {
sqlStr := "INSERT INTO RepoSize (repo_id, size, head_id) VALUES (?, ?, ?)"
_, err = trans.Exec(sqlStr, repoID, size, newHeadID)
if err != nil {
trans.Rollback()
return err
}
} else {
sqlStr = "UPDATE RepoSize SET size = ?, head_id = ? WHERE repo_id = ?"
_, err = trans.Exec(sqlStr, size, newHeadID, repoID)
if err != nil {
trans.Rollback()
return err
}
}
var exist int
sqlStr = "SELECT 1 FROM RepoFileCount WHERE repo_id=?"
row = trans.QueryRow(sqlStr, repoID)
if err := row.Scan(&exist); err != nil {
if err != sql.ErrNoRows {
trans.Rollback()
return err
}
}
if exist != 0 {
sqlStr := "UPDATE RepoFileCount SET file_count=? WHERE repo_id=?"
_, err = trans.Exec(sqlStr, fileCount, repoID)
if err != nil {
trans.Rollback()
return err
}
} else {
sqlStr := "INSERT INTO RepoFileCount (repo_id,file_count) VALUES (?,?)"
_, err = trans.Exec(sqlStr, repoID, fileCount)
if err != nil {
trans.Rollback()
return err
}
}
trans.Commit()
return nil
}
// RepoInfo contains repo information.
type RepoInfo struct {
HeadID string
Size int64
FileCount int64
}
func getOldRepoInfo(repoID string) (*RepoInfo, error) {
sqlStr := "select s.head_id,s.size,f.file_count FROM RepoSize s LEFT JOIN RepoFileCount f ON " +
"s.repo_id=f.repo_id WHERE s.repo_id=?"
repoInfo := new(RepoInfo)
row := seafileDB.QueryRow(sqlStr, repoID)
if err := row.Scan(&repoInfo.HeadID, &repoInfo.Size, &repoInfo.FileCount); err != nil {
if err != sql.ErrNoRows {
return nil, err
}
return nil, nil
}
return repoInfo, nil
}

1261
fileserver/sync_api.go Normal file

File diff suppressed because it is too large Load Diff

322
fileserver/virtual_repo.go Normal file
View File

@@ -0,0 +1,322 @@
package main
import (
"fmt"
"log"
"path/filepath"
"strings"
"time"
"math/rand"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/diff"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/repomgr"
)
func mergeVirtualRepo(repoID, excludeRepo string) {
virtual, err := repomgr.IsVirtualRepo(repoID)
if err != nil {
return
}
if virtual {
mergeRepo(repoID)
return
}
vRepos, _ := repomgr.GetVirtualRepoIDsByOrigin(repoID)
for _, id := range vRepos {
if id == excludeRepo {
continue
}
mergeRepo(id)
}
return
}
func mergeRepo(repoID string) error {
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get virt repo %.10s", repoID)
return err
}
vInfo := repo.VirtualInfo
if vInfo == nil {
return nil
}
origRepo := repomgr.Get(vInfo.OriginRepoID)
if origRepo == nil {
err := fmt.Errorf("failed to get orig repo %.10s", repoID)
return err
}
head, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get commit %s:%.8s", repo.ID, repo.HeadCommitID)
return err
}
origHead, err := commitmgr.Load(origRepo.ID, origRepo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get commit %s:%.8s", origRepo.ID, origRepo.HeadCommitID)
return err
}
var origRoot string
origRoot, _ = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, vInfo.Path)
if origRoot == "" {
newPath, _ := handleMissingVirtualRepo(origRepo, origHead, vInfo)
if newPath != "" {
origRoot, _ = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, newPath)
}
if origRoot == "" {
return nil
}
}
base, err := commitmgr.Load(origRepo.ID, vInfo.BaseCommitID)
if err != nil {
err := fmt.Errorf("failed to get commit %s:%.8s", origRepo.ID, vInfo.BaseCommitID)
return err
}
root := head.RootID
baseRoot, _ := fsmgr.GetSeafdirIDByPath(origRepo.StoreID, base.RootID, vInfo.Path)
if baseRoot == "" {
err := fmt.Errorf("cannot find seafdir for repo %.10s path %s", vInfo.OriginRepoID, vInfo.Path)
return err
}
if root == origRoot {
} else if baseRoot == root {
_, err := updateDir(repoID, "/", origRoot, origHead.CreatorName, head.CommitID)
if err != nil {
err := fmt.Errorf("failed to update root of virtual repo %.10s", repoID)
return err
}
repomgr.SetVirtualRepoBaseCommitPath(repo.ID, origRepo.HeadCommitID, vInfo.Path)
} else if baseRoot == origRoot {
newBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, root, head.CreatorName, origHead.CommitID)
if err != nil {
err := fmt.Errorf("failed to update origin repo%.10s path %s", vInfo.OriginRepoID, vInfo.Path)
return err
}
repomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path)
cleanupVirtualRepos(vInfo.OriginRepoID)
mergeVirtualRepo(vInfo.OriginRepoID, repoID)
} else {
roots := []string{baseRoot, origRoot, root}
opt := new(mergeOptions)
opt.remoteRepoID = repoID
opt.remoteHead = head.CommitID
err := mergeTrees(origRepo.StoreID, roots, opt)
if err != nil {
err := fmt.Errorf("failed to merge")
return err
}
_, err = updateDir(repoID, "/", opt.mergedRoot, origHead.CreatorName, head.CommitID)
if err != nil {
err := fmt.Errorf("failed to update root of virtual repo %.10s", repoID)
return err
}
newBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, opt.mergedRoot, head.CreatorName, origHead.CommitID)
if err != nil {
err := fmt.Errorf("failed to update origin repo %.10s path %s", vInfo.OriginRepoID, vInfo.Path)
return err
}
repomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path)
cleanupVirtualRepos(vInfo.OriginRepoID)
mergeVirtualRepo(vInfo.OriginRepoID, repoID)
}
return nil
}
func cleanupVirtualRepos(repoID string) error {
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %.10s", repoID)
return err
}
head, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err)
return err
}
vRepos, err := repomgr.GetVirtualRepoInfoByOrigin(repoID)
if err != nil {
err := fmt.Errorf("failed to get virtual repo ids by origin repo %.10s", repoID)
return err
}
for _, vInfo := range vRepos {
_, err := fsmgr.GetSeafdirByPath(repo.StoreID, head.RootID, vInfo.Path)
if err != nil {
if err == fsmgr.ErrPathNoExist {
handleMissingVirtualRepo(repo, head, vInfo)
}
}
}
return nil
}
func handleMissingVirtualRepo(repo *repomgr.Repo, head *commitmgr.Commit, vInfo *repomgr.VRepoInfo) (string, error) {
parent, err := commitmgr.Load(head.RepoID, head.ParentID)
if err != nil {
err := fmt.Errorf("failed to load commit %s/%s : %v", head.RepoID, head.ParentID, err)
return "", err
}
var results []*diff.DiffEntry
err = diff.DiffCommits(parent, head, &results, true)
if err != nil {
err := fmt.Errorf("failed to diff commits")
return "", err
}
parPath := vInfo.Path
var isRenamed bool
var subPath string
var returnPath string
for {
var newPath string
oldDirID, err := fsmgr.GetSeafdirIDByPath(repo.StoreID, parent.RootID, parPath)
if err != nil || oldDirID == "" {
if err == fsmgr.ErrPathNoExist {
repomgr.DelVirtualRepo(vInfo.RepoID, cloudMode)
}
err := fmt.Errorf("failed to find %s under commit %s in repo %s", parPath, parent.CommitID, repo.StoreID)
return "", err
}
for _, de := range results {
if de.Status == diff.DiffStatusDirRenamed {
if de.Sha1 == oldDirID {
if subPath != "" {
newPath = filepath.Join("/", de.NewName, subPath)
} else {
newPath = filepath.Join("/", de.NewName)
}
repomgr.SetVirtualRepoBaseCommitPath(vInfo.RepoID, head.CommitID, newPath)
returnPath = newPath
if subPath == "" {
newName := filepath.Base(newPath)
err := editRepo(repo.ID, newName, "Changed library name", "")
if err != nil {
log.Printf("falied to rename repo %s.\n", newName)
}
}
isRenamed = true
break
}
}
}
if isRenamed {
break
}
slash := strings.LastIndex(parPath, "/")
if slash <= 0 {
break
}
subPath = filepath.Base(parPath)
parPath = filepath.Dir(parPath)
}
if !isRenamed {
repomgr.DelVirtualRepo(vInfo.RepoID, cloudMode)
}
return returnPath, nil
}
func editRepo(repoID, name, desc, user string) error {
if name == "" && desc == "" {
err := fmt.Errorf("at least one argument should be non-null")
return err
}
var retryCnt int
for retry, err := editRepoNeedRetry(repoID, name, desc, user); err != nil || retry; {
if err != nil {
err := fmt.Errorf("failed to edit repo: %v", err)
return err
}
if retryCnt < 3 {
random := rand.Intn(10) + 1
time.Sleep(time.Duration(random*100) * time.Millisecond)
retryCnt++
} else {
err := fmt.Errorf("stop edit repo %s after 3 retries", repoID)
return err
}
}
return nil
}
func editRepoNeedRetry(repoID, name, desc, user string) (bool, error) {
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("no such library")
return false, err
}
if name == "" {
name = repo.Name
}
if desc == "" {
desc = repo.Desc
}
parent, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get commit %s:%s", repo.ID, repo.HeadCommitID)
return false, err
}
if user == "" {
user = parent.CreatorName
}
commit := commitmgr.NewCommit(repoID, parent.CommitID, parent.RootID, user, "Changed library name or description")
repomgr.RepoToCommit(repo, commit)
commit.RepoName = name
commit.RepoDesc = desc
err = commitmgr.Save(commit)
if err != nil {
err := fmt.Errorf("failed to add commit: %v", err)
return false, err
}
err = updateBranch(repoID, commit.CommitID, parent.CommitID)
if err != nil {
return true, nil
}
updateRepoInfo(repoID, commit.CommitID)
return true, nil
}
func updateRepoInfo(repoID, commitID string) error {
head, err := commitmgr.Load(repoID, commitID)
if err != nil {
err := fmt.Errorf("failed to get commit %s:%s", repoID, commitID)
return err
}
repomgr.SetRepoCommitToDb(repoID, head.RepoName, head.Ctime, head.Version, head.Encrypted, head.CreatorName)
return nil
}

View File

@@ -111,6 +111,10 @@ seafile_session_new(const char *central_config_dir,
"general", "cloud_mode",
NULL);
session->go_fileserver = g_key_file_get_boolean (config,
"fileserver", "use_go_fileserver",
NULL);
if (load_database_config (session) < 0) {
seaf_warning ("Failed to load database config.\n");
goto onerror;
@@ -278,10 +282,12 @@ seafile_session_start (SeafileSession *session)
return -1;
}
if (!session->go_fileserver) {
if (seaf_http_server_start (session->http_server) < 0) {
seaf_warning ("Failed to start http server thread.\n");
return -1;
}
}
return 0;
}

View File

@@ -76,6 +76,8 @@ struct _SeafileSession {
gboolean create_tables;
gboolean ccnet_create_tables;
gboolean go_fileserver;
};
extern SeafileSession *seaf;

View File

@@ -176,6 +176,7 @@ seaf_web_at_manager_get_access_token (SeafWebAccessTokenManager *mgr,
pthread_mutex_unlock (&mgr->priv->lock);
if (!seaf->go_fileserver) {
if (strcmp(op, "download-dir") == 0 ||
strcmp(op, "download-multi") == 0 ||
strcmp(op, "download-dir-link") == 0 ||
@@ -200,6 +201,7 @@ seaf_web_at_manager_get_access_token (SeafWebAccessTokenManager *mgr,
}
g_object_unref (webaccess);
}
}
return t;
}

View File

@@ -12,6 +12,12 @@ PORT = 9999
[Database]
CREATE_TABLES = true
ENGINE = mysql
HOST = 127.0.0.1
USER = seafile
PASSWD = seafile
DB = ccnet_db
CONNECTION_CHARSET=utf8
#[Database]
#ENGINE = mysql

View File

@@ -0,0 +1,207 @@
import pytest
import requests
import os
import time
from tests.config import USER, USER2
from seaserv import seafile_api as api
from requests_toolbelt import MultipartEncoder
file_name = 'file.txt'
file_name_not_replaced = 'file (1).txt'
file_path = os.getcwd() + '/' + file_name
file_content = 'File content.\r\n'
file_size = len(file_content)
resumable_file_name = 'resumable.txt'
resumable_test_file_name = 'test/resumable.txt'
chunked_part1_name = 'part1.txt'
chunked_part2_name = 'part2.txt'
chunked_part1_path = os.getcwd() + '/' + chunked_part1_name
chunked_part2_path = os.getcwd() + '/' + chunked_part2_name
chunked_part1_content = 'First line.\r\n'
chunked_part2_content = 'Second line.\r\n'
total_size = len(chunked_part1_content) + len(chunked_part2_content)
#File_id is not used when upload files, but
#the argument obj_id of get_fileserver_access_token shouldn't be NULL.
file_id = '0000000000000000000000000000000000000000'
def create_test_file():
fp = open(file_path, 'w')
fp.close()
fp = open(chunked_part1_path, 'w')
fp.close()
fp = open(chunked_part2_path, 'w')
fp.close()
def create_test_dir(repo, dir_name):
parent_dir = '/'
api.post_dir(repo.id,parent_dir,dir_name,USER)
def assert_upload_response(response, replace, file_exist):
assert response.status_code == 200
response_json = response.json()
assert response_json[0]['size'] == 0
assert response_json[0]['id'] == file_id
if file_exist and not replace:
assert response_json[0]['name'] == file_name_not_replaced
else:
assert response_json[0]['name'] == file_name
def assert_resumable_upload_response(response, repo_id, file_name, upload_complete):
assert response.status_code == 200
if not upload_complete:
assert response.text == '{"success": true}'
offset = api.get_upload_tmp_file_offset(repo_id, '/' + file_name)
assert offset == len(chunked_part1_content)
else:
response_json = response.json()
assert response_json[0]['size'] == total_size
new_file_id = response_json[0]['id']
assert len(new_file_id) == 40 and new_file_id != file_id
assert response_json[0]['name'] == resumable_file_name
def assert_update_response(response, is_json):
assert response.status_code == 200
if is_json:
response_json = response.json()
assert response_json[0]['size'] == file_size
new_file_id = response_json[0]['id']
assert len(new_file_id) == 40 and new_file_id != file_id
assert response_json[0]['name'] == file_name
else:
new_file_id = response.text
assert len(new_file_id) == 40 and new_file_id != file_id
def request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_ajax):
write_file(chunked_part1_path, chunked_part1_content)
write_file(chunked_part2_path, chunked_part2_content)
m = MultipartEncoder(
fields={
'parent_dir': parent_dir,
'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream')
})
params = {'ret-json':'1'}
headers["Content-type"] = m.content_type
if is_ajax:
response = requests.post(upload_url_base, headers = headers,
data = m)
else:
response = requests.post(upload_url_base, headers = headers,
data = m, params = params)
return response
def write_file(file_path, file_content):
fp = open(file_path, 'w')
fp.write(file_content)
fp.close()
def del_local_files():
os.remove(file_path)
os.remove(chunked_part1_path)
os.remove(chunked_part2_path)
def test_merge_virtual_repo(repo):
api.post_dir(repo.id, '/dir1', 'subdir1', USER)
api.post_dir(repo.id, '/dir2', 'subdir2', USER)
v_repo_id = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, 'rw')
create_test_file()
params = {'ret-json':'1'}
obj_id = '{"parent_dir":"/"}'
create_test_dir(repo,'test')
#test upload file to vritual repo root dir.
token = api.get_fileserver_access_token(v_repo_id, obj_id, 'upload', USER2, False)
upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token
m = MultipartEncoder(
fields={
'parent_dir': '/',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, False)
time.sleep (1.5)
repo_size = api.get_repo_size (v_repo_id)
assert repo_size == 0
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test resumable upload file to virtual repo root dir
parent_dir = '/'
headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),
str(total_size)),
'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)}
response = request_resumable_upload(chunked_part1_path,headers, upload_url_base,parent_dir, False)
assert_resumable_upload_response(response, v_repo_id,
resumable_file_name, False)
time.sleep (1.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == 0
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),
str(total_size - 1),
str(total_size)),
'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)}
response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False)
assert_resumable_upload_response(response, v_repo_id,
resumable_file_name, True)
time.sleep (2.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == total_size
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size
#test update file to virtual repo.
write_file(file_path, file_content)
token = api.get_fileserver_access_token(v_repo_id, obj_id, 'update', USER2, False)
update_url_base = 'http://127.0.0.1:8082/update-api/' + token
m = MultipartEncoder(
fields={
'target_file': '/' + file_name,
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(update_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert_update_response(response, False)
time.sleep (1.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == total_size + file_size
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size + file_size
api.del_file(v_repo_id, '/', file_name, USER2)
time.sleep (1.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == total_size
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size
api.del_file(v_repo_id, '/', resumable_file_name, USER2)
time.sleep (1.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == 0
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
api.del_file(repo.id, '/dir1', 'subdir1', USER)
api.del_file(repo.id, '/dir2', 'subdir1', USER)
assert api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0
del_local_files()

View File

@@ -1,8 +1,10 @@
import pytest
import requests
import os
import time
from tests.config import USER
from seaserv import seafile_api as api
from requests_toolbelt import MultipartEncoder
file_name = 'file.txt'
file_name_not_replaced = 'file (1).txt'
@@ -75,15 +77,19 @@ def request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_aja
write_file(chunked_part1_path, chunked_part1_content)
write_file(chunked_part2_path, chunked_part2_content)
files = {'file': open(filepath, 'rb'),
'parent_dir':parent_dir}
m = MultipartEncoder(
fields={
'parent_dir': parent_dir,
'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream')
})
params = {'ret-json':'1'}
headers["Content-type"] = m.content_type
if is_ajax:
response = requests.post(upload_url_base, headers = headers,
files = files)
data = m)
else:
response = requests.post(upload_url_base, headers = headers,
files = files, params = params)
data = m, params = params)
return response
def write_file(file_path, file_content):
@@ -110,58 +116,106 @@ def test_ajax(repo):
#test upload file to test dir.
token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)
upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token
files = {'file': open(file_path, 'rb'),
'parent_dir':'/test'}
response = requests.post(upload_url_base, files = files)
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#test upload file to root dir.
token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)
upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token
files = {'file': open(file_path, 'rb'),
'parent_dir':'/'}
response = requests.post(upload_url_base, files = files)
m = MultipartEncoder(
fields={
'parent_dir': '/',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, False)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test upload file to test dir when file already exists.
files = {'file': open(file_path, 'rb'),
'parent_dir':'/test'}
response = requests.post(upload_url_base, files = files)
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#test upload file to root dir when file already exists.
files = {'file': open(file_path, 'rb'),
'parent_dir':'/'}
response = requests.post(upload_url_base, files = files)
m = MultipartEncoder(
fields={
'parent_dir': '/',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, True)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test upload file to subdir whose parent is test dir.
files = {'file': open(file_path, 'rb'),
'parent_dir':'/test',
'relative_path':'subdir'}
response = requests.post(upload_url_base, files = files)
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'relative_path':'subdir',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#test upload file to subdir whose parent is root dir.
files = {'file': open(file_path, 'rb'),
'parent_dir':'/',
'relative_path':'subdir'}
response = requests.post(upload_url_base, files = files)
m = MultipartEncoder(
fields={
'parent_dir': '/',
'relative_path':'subdir',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, False)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test upload file to subdir whose parent is test dir when file already exists.
files = {'file': open(file_path, 'rb'),
'parent_dir':'/test',
'relative_path':'subdir'}
response = requests.post(upload_url_base, files = files)
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'relative_path':'subdir',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#test upload file to subdir whose parent is root dir when file already exists.
files = {'file': open(file_path, 'rb'),
'parent_dir':'/',
'relative_path':'subdir'}
response = requests.post(upload_url_base, files = files)
m = MultipartEncoder(
fields={
'parent_dir': '/',
'relative_path':'subdir',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, True)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test resumable upload file to test dir
parent_dir = '/test'
headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),
@@ -187,25 +241,39 @@ def test_ajax(repo):
assert_resumable_upload_response(response, repo.id,
resumable_file_name, False)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),
str(total_size - 1),
str(total_size)),
'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)}
response = requests.post(upload_url_base, headers = headers,
files = files)
response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, True)
assert_resumable_upload_response(response, repo.id,
resumable_file_name, True)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size
#test update file.
write_file(file_path, file_content)
token = api.get_fileserver_access_token(repo.id, obj_id, 'update', USER, False)
update_url_base = 'http://127.0.0.1:8082/update-aj/' + token
files = {'file': open(file_path, 'rb'),
'target_file':'/' + file_name}
response = requests.post(update_url_base, files = files)
m = MultipartEncoder(
fields={
'target_file': '/' + file_name,
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(update_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert_update_response(response, True)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size + file_size
time.sleep(1)
del_repo_files(repo.id)
del_local_files()
@@ -217,101 +285,172 @@ def test_api(repo):
#test upload file to test dir instead of root dir.
token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)
upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token
files = {'file':open(file_path, 'rb'),
'parent_dir':'/test'}
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#test upload file to root dir.
params = {'ret-json':'1'}
token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)
upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token
files = {'file':open(file_path, 'rb'),
'parent_dir':'/'}
m = MultipartEncoder(
fields={
'parent_dir': '/',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, False)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test upload file to test dir instead of root dir when file already exists and replace is set.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/test',
'replace':'1'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'replace': '1',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#test upload file to root dir when file already exists and replace is set.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/',
'replace':'1'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/',
'replace': '1',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, True, True)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test upload file to test dir instead of root dir when file already exists and replace is unset.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/test'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#test upload file to root dir when file already exists and replace is unset.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, True)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test upload the file to subdir whose parent is test.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/test',
'relative_path':'subdir'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'relative_path': 'subdir',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#test upload the file to subdir.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/',
'relative_path':'subdir'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/',
'relative_path': 'subdir',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, False)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test upload the file to subdir whose parent is test when file already exists and replace is set.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/test',
'relative_path':'subdir',
'replace':'1'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'relative_path': 'subdir',
'replace': '1',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#test upload the file to subdir when file already exists and replace is set.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/',
'relative_path':'subdir',
'replace':'1'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/',
'relative_path': 'subdir',
'replace': '1',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, True, True)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#unset test upload the file to subdir whose parent is test dir when file already exists and replace is unset.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/test',
'relative_path':'subdir'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/test',
'relative_path': 'subdir',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert response.status_code == 403
#unset test upload the file to subdir when file already exists and replace is unset.
files = {'file':open(file_path, 'rb'),
'parent_dir':'/',
'relative_path':'subdir'}
params = {'ret-json':'1'}
m = MultipartEncoder(
fields={
'parent_dir': '/',
'relative_path': 'subdir',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
files = files)
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, True)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test resumable upload file to test
parent_dir = '/test'
headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),
@@ -321,6 +460,10 @@ def test_api(repo):
assert_resumable_upload_response(response, repo.id,
resumable_test_file_name, False)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),
str(total_size - 1),
str(total_size)),
@@ -337,6 +480,9 @@ def test_api(repo):
assert_resumable_upload_response(response, repo.id,
resumable_file_name, False)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),
str(total_size - 1),
str(total_size)),
@@ -345,14 +491,27 @@ def test_api(repo):
assert_resumable_upload_response(response, repo.id,
resumable_file_name, True)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size
#test update file.
write_file(file_path, file_content)
token = api.get_fileserver_access_token(repo.id, obj_id, 'update', USER, False)
update_url_base = 'http://127.0.0.1:8082/update-api/' + token
files = {'file':open(file_path, 'rb'),
'target_file':'/' + file_name}
response = requests.post(update_url_base, files = files)
m = MultipartEncoder(
fields={
'target_file': '/' + file_name,
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(update_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert_update_response(response, False)
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size + file_size
time.sleep(1)
del_repo_files(repo.id)
del_local_files()

View File

@@ -43,15 +43,8 @@ def test_zip_download():
obj_id_json_str = json.dumps(obj_id)
token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str,
'download-dir', USER)
while True:
time.sleep(0.5)
progress_json_str = api.query_zip_progress(token)
progress = json.loads(progress_json_str)
if progress['zipped'] != progress['total']:
continue
assert progress['zipped'] == 2 and progress['total'] == 2
break
time.sleep(1)
download_url = base_url + 'zip/' + token
response = requests.get(download_url)
assert response.status_code == 200
@@ -88,15 +81,7 @@ def test_zip_download():
obj_id_json_str = json.dumps(obj_id)
token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str,
'download-dir', USER)
while True:
time.sleep(0.5)
progress_json_str = api.query_zip_progress(token)
progress = json.loads(progress_json_str)
if progress['zipped'] != progress['total']:
continue
assert progress['zipped'] == 0 and progress['total'] == 0
break
time.sleep(1)
download_url = base_url + 'zip/' + token
response = requests.get(download_url)
assert response.status_code == 200
@@ -121,15 +106,8 @@ def test_zip_download():
obj_id_json_str = json.dumps(obj_id)
token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str,
'download-multi', USER)
while True:
time.sleep(0.5)
progress_json_str = api.query_zip_progress(token)
progress = json.loads(progress_json_str)
if progress['zipped'] != progress['total']:
continue
assert progress['zipped'] == 2 and progress['total'] == 2
break
time.sleep(1)
download_url = base_url + 'zip/' + token
response = requests.get(download_url)
assert response.status_code == 200