diff --git a/fileserver/diff/diff.go b/fileserver/diff/diff.go index 7746cad..77aa027 100644 --- a/fileserver/diff/diff.go +++ b/fileserver/diff/diff.go @@ -3,6 +3,7 @@ package diff import ( "context" "fmt" + "io" "path/filepath" "strings" @@ -25,6 +26,7 @@ type DiffOptions struct { RepoID string Ctx context.Context Data interface{} + Reader io.ReadCloser } type diffData struct { @@ -33,6 +35,10 @@ type diffData struct { } func DiffTrees(roots []string, opt *DiffOptions) error { + reader := fsmgr.GetOneZlibReader() + defer fsmgr.ReturnOneZlibReader(reader) + opt.Reader = reader + n := len(roots) if n != 2 && n != 3 { err := fmt.Errorf("the number of commit trees is illegal") @@ -40,7 +46,7 @@ func DiffTrees(roots []string, opt *DiffOptions) error { } trees := make([]*fsmgr.SeafDir, n) for i := 0; i < n; i++ { - root, err := fsmgr.GetSeafdir(opt.RepoID, roots[i]) + root, err := fsmgr.GetSeafdirWithZlibReader(opt.RepoID, roots[i], opt.Reader) if err != nil { err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, roots[i]) return err @@ -165,7 +171,7 @@ func diffDirectories(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions var dirName string for i := 0; i < n; i++ { if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { - dir, err := fsmgr.GetSeafdir(opt.RepoID, dents[i].ID) + dir, err := fsmgr.GetSeafdirWithZlibReader(opt.RepoID, dents[i].ID, opt.Reader) if err != nil { err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, dents[i].ID) return err diff --git a/fileserver/fileop.go b/fileserver/fileop.go index 7de2492..11bd2bb 100644 --- a/fileserver/fileop.go +++ b/fileserver/fileop.go @@ -76,7 +76,7 @@ func initUpload() { os.MkdirAll(objDir, os.ModePerm) } -//contentType = "application/octet-stream" +// contentType = "application/octet-stream" func parseContentType(fileName string) string { var contentType string @@ -1922,6 +1922,10 @@ func postMultiFilesRecursive(repo *repomgr.Repo, dirID, toPath, user string, den return "", err } ret = newdir.DirID + } else { + // The ret will be an empty string when failed to find parent dir, an error should be returned in such case. + err := fmt.Errorf("failed to find parent dir for %s", toPath) + return "", err } return ret, nil @@ -2570,6 +2574,9 @@ func putFileRecursive(repo *repomgr.Repo, dirID, toPath string, newDent *fsmgr.S return "", err } ret = newdir.DirID + } else { + err := fmt.Errorf("failed to find parent dir for %s", toPath) + return "", err } return ret, nil diff --git a/fileserver/fileserver.go b/fileserver/fileserver.go index 2c0f0a9..a8db1ad 100644 --- a/fileserver/fileserver.go +++ b/fileserver/fileserver.go @@ -73,7 +73,9 @@ type fileServerOptions struct { profilePassword string enableProfiling bool // Go log level - logLevel string + logLevel string + fsCacheLimit int64 + fsIdListRequestTimeout int64 } var options fileServerOptions @@ -379,6 +381,18 @@ func parseFileServerSection(section *ini.Section) { if key, err := section.GetKey("go_log_level"); err == nil { options.logLevel = key.String() } + if key, err := section.GetKey("fs_cache_limit"); err == nil { + fsCacheLimit, err := key.Int64() + if err == nil { + options.fsCacheLimit = fsCacheLimit * 1024 * 1024 + } + } + if key, err := section.GetKey("fs_id_list_request_timeout"); err == nil { + fsIdListRequestTimeout, err := key.Int64() + if err == nil { + options.fsIdListRequestTimeout = fsIdListRequestTimeout + } + } } func initDefaultOptions() { @@ -390,6 +404,8 @@ func initDefaultOptions() { options.webTokenExpireTime = 7200 options.clusterSharedTempFileMode = 0600 options.defaultQuota = InfiniteQuota + options.fsCacheLimit = 2 << 30 + options.fsIdListRequestTimeout = -1 } func writePidFile(pid_file_path string) error { @@ -490,7 +506,7 @@ func main() { repomgr.Init(seafileDB) - fsmgr.Init(centralDir, dataDir) + fsmgr.Init(centralDir, dataDir, options.fsCacheLimit) blockmgr.Init(centralDir, dataDir) diff --git a/fileserver/fsmgr/fsmgr.go b/fileserver/fsmgr/fsmgr.go index 960c0f5..607f41c 100644 --- a/fileserver/fsmgr/fsmgr.go +++ b/fileserver/fsmgr/fsmgr.go @@ -6,17 +6,24 @@ import ( "compress/zlib" "crypto/sha1" "encoding/hex" - "encoding/json" "fmt" "io" "path/filepath" "strings" + "sync" "syscall" + "time" + "unsafe" "github.com/haiwen/seafile-server/fileserver/objstore" "github.com/haiwen/seafile-server/fileserver/utils" + jsoniter "github.com/json-iterator/go" + + "github.com/dgraph-io/ristretto" ) +var json = jsoniter.ConfigCompatibleWithStandardLibrary + // Seafile is a file object type Seafile struct { data []byte @@ -149,7 +156,7 @@ func (dent *SeafDirent) toJSON() ([]byte, error) { return buf.Bytes(), nil } -//SeafDir is a dir object +// SeafDir is a dir object type SeafDir struct { data []byte Version int `json:"version"` @@ -218,9 +225,115 @@ const ( EmptySha1 = "0000000000000000000000000000000000000000" ) +// Since zlib library allocates a large amount of memory every time a new reader is created, when the number of calls is too large, +// the GC will be executed frequently, resulting in high CPU usage. +var zlibReaders []io.ReadCloser +var zlibLock sync.Mutex + +// Add fs cache, on the one hand to avoid repeated creation and destruction of repeatedly accessed objects, +// on the other hand it will also slow down the speed at which objects are released. +var fsCache *ristretto.Cache + // Init initializes fs manager and creates underlying object store. -func Init(seafileConfPath string, seafileDataDir string) { +func Init(seafileConfPath string, seafileDataDir string, fsCacheLimit int64) { store = objstore.New(seafileConfPath, seafileDataDir, "fs") + fsCache, _ = ristretto.NewCache(&ristretto.Config{ + NumCounters: 1e7, // number of keys to track frequency of (10M). + MaxCost: fsCacheLimit, // maximum cost of cache. + BufferItems: 64, // number of keys per Get buffer. + Cost: calCost, + }) +} + +func calCost(value interface{}) int64 { + return sizeOf(value) +} + +const ( + sizeOfString = int64(unsafe.Sizeof(string(""))) + sizeOfPointer = int64(unsafe.Sizeof(uintptr(0))) + sizeOfSeafile = int64(unsafe.Sizeof(Seafile{})) + sizeOfSeafDir = int64(unsafe.Sizeof(SeafDir{})) + sizeOfSeafDirent = int64(unsafe.Sizeof(SeafDirent{})) +) + +func sizeOf(a interface{}) int64 { + var size int64 + switch x := a.(type) { + case string: + return sizeOfString + int64(len(x)) + case []string: + for _, s := range x { + size += sizeOf(s) + } + return size + case *Seafile: + size = sizeOfPointer + size += sizeOfSeafile + size += int64(len(x.FileID)) + size += sizeOf(x.BlkIDs) + return size + case *SeafDir: + size = sizeOfPointer + size += sizeOfSeafDir + size += int64(len(x.DirID)) + for _, dent := range x.Entries { + size += sizeOf(dent) + } + return size + case *SeafDirent: + size = sizeOfPointer + size += sizeOfSeafDirent + size += int64(len(x.ID)) + size += int64(len(x.Name)) + size += int64(len(x.Modifier)) + return size + + } + return 0 +} + +func initZlibReader() (io.ReadCloser, error) { + var buf bytes.Buffer + + // Since the corresponding reader has not been obtained when zlib is initialized, + // an io.Reader needs to be built to initialize zlib. + w := zlib.NewWriter(&buf) + w.Close() + + r, err := zlib.NewReader(&buf) + if err != nil { + return nil, err + } + + return r, nil +} + +// GetOneZlibReader gets a zlib reader from zlibReaders. +func GetOneZlibReader() io.ReadCloser { + zlibLock.Lock() + defer zlibLock.Unlock() + var reader io.ReadCloser + if len(zlibReaders) == 0 { + reader, err := initZlibReader() + if err != nil { + return nil + } + return reader + } + reader = zlibReaders[0] + zlibReaders = zlibReaders[1:] + + return reader +} + +func ReturnOneZlibReader(reader io.ReadCloser) { + if reader == nil { + return + } + zlibLock.Lock() + defer zlibLock.Unlock() + zlibReaders = append(zlibReaders, reader) } // NewDirent initializes a SeafDirent object @@ -285,21 +398,37 @@ func NewSeafile(version int, fileSize int64, blkIDs []string) (*Seafile, error) return seafile, nil } -func uncompress(p []byte) ([]byte, error) { +func uncompress(p []byte, reader io.ReadCloser) ([]byte, error) { b := bytes.NewReader(p) var out bytes.Buffer - r, err := zlib.NewReader(b) - if err != nil { - return nil, err - } - _, err = io.Copy(&out, r) - if err != nil { + if reader == nil { + r, err := zlib.NewReader(b) + if err != nil { + return nil, err + } + + _, err = io.Copy(&out, r) + if err != nil { + r.Close() + return nil, err + } + r.Close() + return out.Bytes(), nil + } + + // resue the old zlib reader. + resetter, _ := reader.(zlib.Resetter) + err := resetter.Reset(b, nil) + if err != nil { return nil, err } - r.Close() + _, err = io.Copy(&out, reader) + if err != nil { + return nil, err + } return out.Bytes(), nil } @@ -320,8 +449,8 @@ func compress(p []byte) ([]byte, error) { } // FromData reads from p and converts JSON-encoded data to Seafile. -func (seafile *Seafile) FromData(p []byte) error { - b, err := uncompress(p) +func (seafile *Seafile) FromData(p []byte, reader io.ReadCloser) error { + b, err := uncompress(p, reader) if err != nil { return err } @@ -379,8 +508,8 @@ func (seafdir *SeafDir) ToData(w io.Writer) error { } // FromData reads from p and converts JSON-encoded data to SeafDir. -func (seafdir *SeafDir) FromData(p []byte) error { - b, err := uncompress(p) +func (seafdir *SeafDir) FromData(p []byte, reader io.ReadCloser) error { + b, err := uncompress(p, reader) if err != nil { return err } @@ -427,6 +556,16 @@ func WriteRaw(repoID string, objID string, r io.Reader) error { // GetSeafile gets seafile from storage backend. func GetSeafile(repoID string, fileID string) (*Seafile, error) { + return getSeafile(repoID, fileID, nil) +} + +// GetSeafileWithZlibReader gets seafile from storage backend with a zlib reader. +func GetSeafileWithZlibReader(repoID string, fileID string, reader io.ReadCloser) (*Seafile, error) { + return getSeafile(repoID, fileID, reader) +} + +func getSeafile(repoID string, fileID string, reader io.ReadCloser) (*Seafile, error) { + var buf bytes.Buffer seafile := new(Seafile) if fileID == EmptySha1 { @@ -442,7 +581,7 @@ func GetSeafile(repoID string, fileID string) (*Seafile, error) { return nil, errors } - err = seafile.FromData(buf.Bytes()) + err = seafile.FromData(buf.Bytes(), reader) if err != nil { errors := fmt.Errorf("failed to parse seafile object %s/%s : %v", repoID, fileID, err) return nil, errors @@ -487,8 +626,24 @@ func SaveSeafile(repoID string, seafile *Seafile) error { // GetSeafdir gets seafdir from storage backend. func GetSeafdir(repoID string, dirID string) (*SeafDir, error) { + return getSeafdir(repoID, dirID, nil, false) +} + +// GetSeafdir gets seafdir from storage backend with a zlib reader. +func GetSeafdirWithZlibReader(repoID string, dirID string, reader io.ReadCloser) (*SeafDir, error) { + return getSeafdir(repoID, dirID, reader, true) +} + +func getSeafdir(repoID string, dirID string, reader io.ReadCloser, useCache bool) (*SeafDir, error) { + var seafdir *SeafDir + if useCache { + seafdir = getSeafdirFromCache(repoID, dirID) + if seafdir != nil { + return seafdir, nil + } + } var buf bytes.Buffer - seafdir := new(SeafDir) + seafdir = new(SeafDir) if dirID == EmptySha1 { seafdir.DirID = EmptySha1 return seafdir, nil @@ -502,7 +657,7 @@ func GetSeafdir(repoID string, dirID string) (*SeafDir, error) { return nil, errors } - err = seafdir.FromData(buf.Bytes()) + err = seafdir.FromData(buf.Bytes(), reader) if err != nil { errors := fmt.Errorf("failed to parse seafdir object %s/%s : %v", repoID, dirID, err) return nil, errors @@ -513,9 +668,34 @@ func GetSeafdir(repoID string, dirID string) (*SeafDir, error) { return nil, errors } + if useCache { + setSeafdirToCache(repoID, seafdir) + } + return seafdir, nil } +func getSeafdirFromCache(repoID string, dirID string) *SeafDir { + key := repoID + dirID + v, ok := fsCache.Get(key) + if !ok { + return nil + } + seafdir, ok := v.(*SeafDir) + if ok { + return seafdir + } + + return nil +} + +func setSeafdirToCache(repoID string, seafdir *SeafDir) error { + key := repoID + seafdir.DirID + fsCache.SetWithTTL(key, seafdir, 0, time.Duration(1*time.Hour)) + + return nil +} + // SaveSeafdir saves seafdir to storage backend. func SaveSeafdir(repoID string, seafdir *SeafDir) error { dirID := seafdir.DirID diff --git a/fileserver/go.mod b/fileserver/go.mod index da2d302..4c18a46 100644 --- a/fileserver/go.mod +++ b/fileserver/go.mod @@ -1,14 +1,26 @@ module github.com/haiwen/seafile-server/fileserver -go 1.14 +go 1.17 require ( + github.com/dgraph-io/ristretto v0.1.1 github.com/go-sql-driver/mysql v1.5.0 github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 + github.com/json-iterator/go v1.1.12 github.com/mattn/go-sqlite3 v1.14.0 - github.com/sirupsen/logrus v1.8.1 // indirect - github.com/smartystreets/goconvey v1.6.4 // indirect - golang.org/x/text v0.3.7 // indirect + github.com/sirupsen/logrus v1.8.1 + golang.org/x/text v0.3.7 gopkg.in/ini.v1 v1.55.0 ) + +require ( + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/smartystreets/goconvey v1.6.4 // indirect + golang.org/x/sys v0.0.0-20221010170243-090e33056c14 // indirect +) diff --git a/fileserver/go.sum b/fileserver/go.sum index 1583472..8230c42 100644 --- a/fileserver/go.sum +++ b/fileserver/go.sum @@ -1,26 +1,52 @@ github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA= github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.3 h1:QdmJJYlDQhMDFrFP8IvVnx66D8mCbaQM4TsxKf7BXzo= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -28,12 +54,16 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14 h1:k5II8e6QD8mITdi+okbbmR/cIyEbeXLBhy5Ha4nevyc= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ= gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/fileserver/merge.go b/fileserver/merge.go index cf1f37e..9cda454 100644 --- a/fileserver/merge.go +++ b/fileserver/merge.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "io" "path/filepath" "sort" "strings" @@ -16,6 +17,7 @@ type mergeOptions struct { remoteHead string mergedRoot string conflict bool + reader io.ReadCloser } func mergeTrees(storeID string, roots []string, opt *mergeOptions) error { @@ -26,7 +28,7 @@ func mergeTrees(storeID string, roots []string, opt *mergeOptions) error { var trees []*fsmgr.SeafDir for i := 0; i < 3; i++ { - dir, err := fsmgr.GetSeafdir(storeID, roots[i]) + dir, err := fsmgr.GetSeafdirWithZlibReader(storeID, roots[i], opt.reader) if err != nil { err := fmt.Errorf("failed to get dir: %v", err) return err @@ -288,7 +290,7 @@ func mergeDirectories(storeID string, dents []*fsmgr.SeafDirent, baseDir string, for i := 0; i < n; i++ { if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { - dir, err := fsmgr.GetSeafdir(storeID, dents[i].ID) + dir, err := fsmgr.GetSeafdirWithZlibReader(storeID, dents[i].ID, opt.reader) if err != nil { err := fmt.Errorf("failed to get seafdir %s/%s", storeID, dents[i].ID) return nil, err diff --git a/fileserver/merge_test.go b/fileserver/merge_test.go index 49a41d1..d4ad433 100644 --- a/fileserver/merge_test.go +++ b/fileserver/merge_test.go @@ -213,7 +213,7 @@ func mergeTestDelFile() error { func TestMergeTrees(t *testing.T) { commitmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir) - fsmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir) + fsmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir, options.fsCacheLimit) err := mergeTestCreateTestDir() if err != nil { fmt.Printf("failed to create test dir: %v", err) diff --git a/fileserver/sync_api.go b/fileserver/sync_api.go index a3b0bd3..e89f739 100644 --- a/fileserver/sync_api.go +++ b/fileserver/sync_api.go @@ -1242,6 +1242,14 @@ func removeSyncAPIExpireCache() { virtualRepoInfoCache.Range(deleteVirtualRepoInfo) } +type collectFsInfo struct { + startTime int64 + isTimeout bool + results []interface{} +} + +var ErrTimeout = fmt.Errorf("get fs id list timeout") + func calculateSendObjectList(ctx context.Context, repo *repomgr.Repo, serverHead string, clientHead string, dirOnly bool) ([]interface{}, error) { masterHead, err := commitmgr.Load(repo.ID, serverHead) if err != nil { @@ -1259,9 +1267,10 @@ func calculateSendObjectList(ctx context.Context, repo *repomgr.Repo, serverHead remoteHeadRoot = remoteHead.RootID } - var results []interface{} + info := new(collectFsInfo) + info.startTime = time.Now().Unix() if remoteHeadRoot != masterHead.RootID && masterHead.RootID != emptySHA1 { - results = append(results, masterHead.RootID) + info.results = append(info.results, masterHead.RootID) } var opt *diff.DiffOptions @@ -1271,21 +1280,24 @@ func calculateSendObjectList(ctx context.Context, repo *repomgr.Repo, serverHead DirCB: collectDirIDs, Ctx: ctx, RepoID: repo.StoreID} - opt.Data = &results + opt.Data = info } else { opt = &diff.DiffOptions{ FileCB: collectFileIDsNOp, DirCB: collectDirIDs, Ctx: ctx, RepoID: repo.StoreID} - opt.Data = &results + opt.Data = info } trees := []string{masterHead.RootID, remoteHeadRoot} if err := diff.DiffTrees(trees, opt); err != nil { + if info.isTimeout { + return nil, ErrTimeout + } return nil, err } - return results, nil + return info.results, nil } func collectFileIDs(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error { @@ -1297,7 +1309,7 @@ func collectFileIDs(ctx context.Context, baseDir string, files []*fsmgr.SeafDire file1 := files[0] file2 := files[1] - results, ok := data.(*[]interface{}) + info, ok := data.(*collectFsInfo) if !ok { err := fmt.Errorf("failed to assert results") return err @@ -1306,7 +1318,7 @@ func collectFileIDs(ctx context.Context, baseDir string, files []*fsmgr.SeafDire if file1 != nil && (file2 == nil || file1.ID != file2.ID) && file1.ID != emptySHA1 { - *results = append(*results, file1.ID) + info.results = append(info.results, file1.ID) } return nil @@ -1323,18 +1335,26 @@ func collectDirIDs(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent default: } - dir1 := dirs[0] - dir2 := dirs[1] - results, ok := data.(*[]interface{}) + info, ok := data.(*collectFsInfo) if !ok { - err := fmt.Errorf("failed to assert results") + err := fmt.Errorf("failed to assert fs info") return err } + dir1 := dirs[0] + dir2 := dirs[1] if dir1 != nil && (dir2 == nil || dir1.ID != dir2.ID) && dir1.ID != emptySHA1 { - *results = append(*results, dir1.ID) + info.results = append(info.results, dir1.ID) + } + + if options.fsIdListRequestTimeout > 0 { + now := time.Now().Unix() + if now-info.startTime > options.fsIdListRequestTimeout { + info.isTimeout = true + return ErrTimeout + } } return nil diff --git a/server/gc/fsck.c b/server/gc/fsck.c index 22a1268..8a40cbd 100644 --- a/server/gc/fsck.c +++ b/server/gc/fsck.c @@ -82,6 +82,10 @@ check_blocks (const char *file_id, FsckData *fsck_data, gboolean *io_error) seafile = seaf_fs_manager_get_seafile (seaf->fs_mgr, store_id, version, file_id); + if (!seafile) { + seaf_warning ("Failed to get seafile: %s/%s\n", store_id, file_id); + return -1; + } for (i = 0; i < seafile->n_blocks; ++i) { block_id = seafile->blk_sha1s[i];