mirror of
https://github.com/haiwen/seafile-server.git
synced 2025-08-19 07:18:25 +00:00
add block map cache (#452)
This commit is contained in:
parent
5090725ff8
commit
d1e57781b5
@ -35,6 +35,14 @@ import (
|
|||||||
"github.com/haiwen/seafile-server/fileserver/repomgr"
|
"github.com/haiwen/seafile-server/fileserver/repomgr"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cacheBlockMapThreshold = 1 << 23
|
||||||
|
blockMapCacheExpiretime int64 = 3600 * 24
|
||||||
|
fileopCleaningIntervalSec = 3600
|
||||||
|
)
|
||||||
|
|
||||||
|
var blockMapCacheTable sync.Map
|
||||||
|
|
||||||
// Dirents is an alias for slice of SeafDirent.
|
// Dirents is an alias for slice of SeafDirent.
|
||||||
type Dirents []*fsmgr.SeafDirent
|
type Dirents []*fsmgr.SeafDirent
|
||||||
|
|
||||||
@ -49,6 +57,15 @@ func (d Dirents) Len() int {
|
|||||||
return len(d)
|
return len(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fileopInit() {
|
||||||
|
ticker := time.NewTicker(time.Second * fileopCleaningIntervalSec)
|
||||||
|
go func() {
|
||||||
|
for range ticker.C {
|
||||||
|
removeFileopExpireCache()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
func initUpload() {
|
func initUpload() {
|
||||||
objDir := filepath.Join(dataDir, "httptemp", "cluster-shared")
|
objDir := filepath.Join(dataDir, "httptemp", "cluster-shared")
|
||||||
os.MkdirAll(objDir, os.ModePerm)
|
os.MkdirAll(objDir, os.ModePerm)
|
||||||
@ -284,6 +301,11 @@ func doFile(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type blockMap struct {
|
||||||
|
blkSize []uint64
|
||||||
|
expireTime int64
|
||||||
|
}
|
||||||
|
|
||||||
func doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string,
|
func doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string,
|
||||||
fileName string, operation string, byteRanges string, user string) *appError {
|
fileName string, operation string, byteRanges string, user string) *appError {
|
||||||
|
|
||||||
@ -317,6 +339,13 @@ func doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, f
|
|||||||
rsp.Header().Set("Content-Range", conRange)
|
rsp.Header().Set("Content-Range", conRange)
|
||||||
|
|
||||||
var blkSize []uint64
|
var blkSize []uint64
|
||||||
|
if file.FileSize > cacheBlockMapThreshold {
|
||||||
|
if v, ok := blockMapCacheTable.Load(file.FileID); ok {
|
||||||
|
if blkMap, ok := v.(*blockMap); ok {
|
||||||
|
blkSize = blkMap.blkSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(blkSize) == 0 {
|
||||||
for _, v := range file.BlkIDs {
|
for _, v := range file.BlkIDs {
|
||||||
size, err := blockmgr.Stat(repo.StoreID, v)
|
size, err := blockmgr.Stat(repo.StoreID, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -325,6 +354,18 @@ func doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, f
|
|||||||
}
|
}
|
||||||
blkSize = append(blkSize, uint64(size))
|
blkSize = append(blkSize, uint64(size))
|
||||||
}
|
}
|
||||||
|
blockMapCacheTable.Store(file.FileID, &blockMap{blkSize, time.Now().Unix() + blockMapCacheExpiretime})
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, v := range file.BlkIDs {
|
||||||
|
size, err := blockmgr.Stat(repo.StoreID, v)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("failed to stat block %s : %v", v, err)
|
||||||
|
return &appError{err, "", http.StatusInternalServerError}
|
||||||
|
}
|
||||||
|
blkSize = append(blkSize, uint64(size))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var off uint64
|
var off uint64
|
||||||
var pos uint64
|
var pos uint64
|
||||||
@ -3075,3 +3116,16 @@ func indexRawBlocks(repoID string, blockIDs []string, fileHeaders []*multipart.F
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func removeFileopExpireCache() {
|
||||||
|
deleteBlockMaps := func(key interface{}, value interface{}) bool {
|
||||||
|
if blkMap, ok := value.(*blockMap); ok {
|
||||||
|
if blkMap.expireTime <= time.Now().Unix() {
|
||||||
|
blockMapCacheTable.Delete(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
blockMapCacheTable.Range(deleteBlockMaps)
|
||||||
|
}
|
||||||
|
@ -338,6 +338,8 @@ func main() {
|
|||||||
|
|
||||||
rpcClientInit()
|
rpcClientInit()
|
||||||
|
|
||||||
|
fileopInit()
|
||||||
|
|
||||||
syncAPIInit()
|
syncAPIInit()
|
||||||
|
|
||||||
sizeSchedulerInit()
|
sizeSchedulerInit()
|
||||||
|
@ -41,7 +41,7 @@ const (
|
|||||||
tokenExpireTime = 7200
|
tokenExpireTime = 7200
|
||||||
permExpireTime = 7200
|
permExpireTime = 7200
|
||||||
virtualRepoExpireTime = 7200
|
virtualRepoExpireTime = 7200
|
||||||
cleaningIntervalSec = 300
|
syncAPICleaningIntervalSec = 300
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -83,10 +83,10 @@ type statusEventData struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func syncAPIInit() {
|
func syncAPIInit() {
|
||||||
ticker := time.NewTicker(time.Second * cleaningIntervalSec)
|
ticker := time.NewTicker(time.Second * syncAPICleaningIntervalSec)
|
||||||
go func() {
|
go func() {
|
||||||
for range ticker.C {
|
for range ticker.C {
|
||||||
removeExpireCache()
|
removeSyncAPIExpireCache()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -1130,7 +1130,7 @@ func publishRepoEvent(rData *repoEventData) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeExpireCache() {
|
func removeSyncAPIExpireCache() {
|
||||||
deleteTokens := func(key interface{}, value interface{}) bool {
|
deleteTokens := func(key interface{}, value interface{}) bool {
|
||||||
if info, ok := value.(*tokenInfo); ok {
|
if info, ok := value.(*tokenInfo); ok {
|
||||||
if info.expireTime <= time.Now().Unix() {
|
if info.expireTime <= time.Now().Unix() {
|
||||||
|
Loading…
Reference in New Issue
Block a user