mirror of
https://github.com/haiwen/seafile-server.git
synced 2025-08-02 07:43:09 +00:00
add block map cache (#452)
This commit is contained in:
parent
5090725ff8
commit
d1e57781b5
@ -35,6 +35,14 @@ import (
|
||||
"github.com/haiwen/seafile-server/fileserver/repomgr"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheBlockMapThreshold = 1 << 23
|
||||
blockMapCacheExpiretime int64 = 3600 * 24
|
||||
fileopCleaningIntervalSec = 3600
|
||||
)
|
||||
|
||||
var blockMapCacheTable sync.Map
|
||||
|
||||
// Dirents is an alias for slice of SeafDirent.
|
||||
type Dirents []*fsmgr.SeafDirent
|
||||
|
||||
@ -49,6 +57,15 @@ func (d Dirents) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func fileopInit() {
|
||||
ticker := time.NewTicker(time.Second * fileopCleaningIntervalSec)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
removeFileopExpireCache()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func initUpload() {
|
||||
objDir := filepath.Join(dataDir, "httptemp", "cluster-shared")
|
||||
os.MkdirAll(objDir, os.ModePerm)
|
||||
@ -284,6 +301,11 @@ func doFile(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockMap struct {
|
||||
blkSize []uint64
|
||||
expireTime int64
|
||||
}
|
||||
|
||||
func doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string,
|
||||
fileName string, operation string, byteRanges string, user string) *appError {
|
||||
|
||||
@ -317,13 +339,32 @@ func doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, f
|
||||
rsp.Header().Set("Content-Range", conRange)
|
||||
|
||||
var blkSize []uint64
|
||||
for _, v := range file.BlkIDs {
|
||||
size, err := blockmgr.Stat(repo.StoreID, v)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to stat block %s : %v", v, err)
|
||||
return &appError{err, "", http.StatusInternalServerError}
|
||||
if file.FileSize > cacheBlockMapThreshold {
|
||||
if v, ok := blockMapCacheTable.Load(file.FileID); ok {
|
||||
if blkMap, ok := v.(*blockMap); ok {
|
||||
blkSize = blkMap.blkSize
|
||||
}
|
||||
}
|
||||
if len(blkSize) == 0 {
|
||||
for _, v := range file.BlkIDs {
|
||||
size, err := blockmgr.Stat(repo.StoreID, v)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to stat block %s : %v", v, err)
|
||||
return &appError{err, "", http.StatusInternalServerError}
|
||||
}
|
||||
blkSize = append(blkSize, uint64(size))
|
||||
}
|
||||
blockMapCacheTable.Store(file.FileID, &blockMap{blkSize, time.Now().Unix() + blockMapCacheExpiretime})
|
||||
}
|
||||
} else {
|
||||
for _, v := range file.BlkIDs {
|
||||
size, err := blockmgr.Stat(repo.StoreID, v)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to stat block %s : %v", v, err)
|
||||
return &appError{err, "", http.StatusInternalServerError}
|
||||
}
|
||||
blkSize = append(blkSize, uint64(size))
|
||||
}
|
||||
blkSize = append(blkSize, uint64(size))
|
||||
}
|
||||
|
||||
var off uint64
|
||||
@ -3075,3 +3116,16 @@ func indexRawBlocks(repoID string, blockIDs []string, fileHeaders []*multipart.F
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeFileopExpireCache() {
|
||||
deleteBlockMaps := func(key interface{}, value interface{}) bool {
|
||||
if blkMap, ok := value.(*blockMap); ok {
|
||||
if blkMap.expireTime <= time.Now().Unix() {
|
||||
blockMapCacheTable.Delete(key)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
blockMapCacheTable.Range(deleteBlockMaps)
|
||||
}
|
||||
|
@ -338,6 +338,8 @@ func main() {
|
||||
|
||||
rpcClientInit()
|
||||
|
||||
fileopInit()
|
||||
|
||||
syncAPIInit()
|
||||
|
||||
sizeSchedulerInit()
|
||||
|
@ -35,13 +35,13 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
seafileServerChannelEvent = "seaf_server.event"
|
||||
seafileServerChannelStats = "seaf_server.stats"
|
||||
emptySHA1 = "0000000000000000000000000000000000000000"
|
||||
tokenExpireTime = 7200
|
||||
permExpireTime = 7200
|
||||
virtualRepoExpireTime = 7200
|
||||
cleaningIntervalSec = 300
|
||||
seafileServerChannelEvent = "seaf_server.event"
|
||||
seafileServerChannelStats = "seaf_server.stats"
|
||||
emptySHA1 = "0000000000000000000000000000000000000000"
|
||||
tokenExpireTime = 7200
|
||||
permExpireTime = 7200
|
||||
virtualRepoExpireTime = 7200
|
||||
syncAPICleaningIntervalSec = 300
|
||||
)
|
||||
|
||||
var (
|
||||
@ -83,10 +83,10 @@ type statusEventData struct {
|
||||
}
|
||||
|
||||
func syncAPIInit() {
|
||||
ticker := time.NewTicker(time.Second * cleaningIntervalSec)
|
||||
ticker := time.NewTicker(time.Second * syncAPICleaningIntervalSec)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
removeExpireCache()
|
||||
removeSyncAPIExpireCache()
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -1130,7 +1130,7 @@ func publishRepoEvent(rData *repoEventData) {
|
||||
}
|
||||
}
|
||||
|
||||
func removeExpireCache() {
|
||||
func removeSyncAPIExpireCache() {
|
||||
deleteTokens := func(key interface{}, value interface{}) bool {
|
||||
if info, ok := value.(*tokenInfo); ok {
|
||||
if info.expireTime <= time.Now().Unix() {
|
||||
|
Loading…
Reference in New Issue
Block a user