mirror of
https://github.com/haiwen/seafile-server.git
synced 2025-09-04 17:00:35 +00:00
Send repo update event to notification server (#602)
* Send repo update event to notification server * Add option to manage configuration * Init default options at start --------- Co-authored-by: 杨赫然 <heran.yang@seafile.com>
This commit is contained in:
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/haiwen/seafile-server/fileserver/commitmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/diff"
|
||||
"github.com/haiwen/seafile-server/fileserver/fsmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/option"
|
||||
"github.com/haiwen/seafile-server/fileserver/repomgr"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
@@ -1829,6 +1830,10 @@ func onBranchUpdated(repoID string, commitID string, updateRepoInfo bool) error
|
||||
}
|
||||
}
|
||||
|
||||
if option.EnableNotification {
|
||||
notifRepoUpdate(repoID, commitID)
|
||||
}
|
||||
|
||||
isVirtual, err := repomgr.IsVirtualRepo(repoID)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1840,6 +1845,71 @@ func onBranchUpdated(repoID string, commitID string, updateRepoInfo bool) error
|
||||
return nil
|
||||
}
|
||||
|
||||
type notifEvent struct {
|
||||
Content *repoUpdateEvent `json:"content"`
|
||||
}
|
||||
type repoUpdateEvent struct {
|
||||
Type string `json:"type"`
|
||||
RepoID string `json:"repo_id"`
|
||||
CommitID string `json:"commit_id"`
|
||||
}
|
||||
|
||||
func notifRepoUpdate(repoID string, commitID string) error {
|
||||
content := new(repoUpdateEvent)
|
||||
content.Type = "repo-update"
|
||||
content.RepoID = repoID
|
||||
content.CommitID = commitID
|
||||
event := new(notifEvent)
|
||||
event.Content = content
|
||||
msg, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
log.Printf("failed to encode repo update event: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("http://%s/events", option.NotificationURL)
|
||||
token, err := genJWTToken(repoID, "")
|
||||
if err != nil {
|
||||
log.Printf("failed to generate jwt token: %v", err)
|
||||
return err
|
||||
}
|
||||
header := map[string][]string{
|
||||
"Seafile-Repo-Token": {token},
|
||||
"Content-Type": {"application/json"},
|
||||
}
|
||||
_, _, err = httpCommon("POST", url, header, bytes.NewReader(msg))
|
||||
if err != nil {
|
||||
log.Printf("failed to send repo update event: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func httpCommon(method, url string, header map[string][]string, reader io.Reader) (int, []byte, error) {
|
||||
req, err := http.NewRequest(method, url, reader)
|
||||
if err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
req.Header = header
|
||||
|
||||
rsp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
|
||||
if rsp.StatusCode != http.StatusOK {
|
||||
return rsp.StatusCode, nil, fmt.Errorf("bad response %d for %s", rsp.StatusCode, url)
|
||||
}
|
||||
body, err := io.ReadAll(rsp.Body)
|
||||
if err != nil {
|
||||
return rsp.StatusCode, nil, err
|
||||
}
|
||||
|
||||
return rsp.StatusCode, body, nil
|
||||
}
|
||||
|
||||
func doPostMultiFiles(repo *repomgr.Repo, rootID, parentDir string, dents []*fsmgr.SeafDirent, user string, replace bool, names *[]string) (string, error) {
|
||||
if parentDir[0] == '/' {
|
||||
parentDir = parentDir[1:]
|
||||
@@ -2043,18 +2113,18 @@ func indexBlocks(ctx context.Context, repoID string, version int, filePath strin
|
||||
|
||||
chunkJobs := make(chan chunkingData, 10)
|
||||
results := make(chan chunkingResult, 10)
|
||||
go createChunkPool(ctx, int(options.maxIndexingThreads), chunkJobs, results)
|
||||
go createChunkPool(ctx, int(option.MaxIndexingThreads), chunkJobs, results)
|
||||
|
||||
var blkSize int64
|
||||
var offset int64
|
||||
|
||||
jobNum := (uint64(size) + options.fixedBlockSize - 1) / options.fixedBlockSize
|
||||
jobNum := (uint64(size) + option.FixedBlockSize - 1) / option.FixedBlockSize
|
||||
blkIDs := make([]string, jobNum)
|
||||
|
||||
left := size
|
||||
for {
|
||||
if uint64(left) >= options.fixedBlockSize {
|
||||
blkSize = int64(options.fixedBlockSize)
|
||||
if uint64(left) >= option.FixedBlockSize {
|
||||
blkSize = int64(option.FixedBlockSize)
|
||||
} else {
|
||||
blkSize = left
|
||||
}
|
||||
@@ -2167,7 +2237,7 @@ func chunkingWorker(ctx context.Context, wg *sync.WaitGroup, chunkJobs chan chun
|
||||
|
||||
job := job
|
||||
blkID, err := chunkFile(job)
|
||||
idx := job.offset / int64(options.fixedBlockSize)
|
||||
idx := job.offset / int64(option.FixedBlockSize)
|
||||
result := chunkingResult{idx, blkID, err}
|
||||
res <- result
|
||||
}
|
||||
@@ -2179,7 +2249,7 @@ func chunkFile(job chunkingData) (string, error) {
|
||||
offset := job.offset
|
||||
filePath := job.filePath
|
||||
handler := job.handler
|
||||
blkSize := options.fixedBlockSize
|
||||
blkSize := option.FixedBlockSize
|
||||
cryptKey := job.cryptKey
|
||||
var file multipart.File
|
||||
if handler != nil {
|
||||
@@ -2275,7 +2345,7 @@ func checkTmpFileList(fsm *recvData) *appError {
|
||||
}
|
||||
}
|
||||
|
||||
if options.maxUploadSize > 0 && uint64(totalSize) > options.maxUploadSize {
|
||||
if option.MaxUploadSize > 0 && uint64(totalSize) > option.MaxUploadSize {
|
||||
msg := "File size is too large.\n"
|
||||
return &appError{nil, msg, seafHTTPResTooLarge}
|
||||
}
|
||||
|
@@ -11,7 +11,6 @@ import (
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
@@ -20,6 +19,7 @@ import (
|
||||
"github.com/haiwen/seafile-server/fileserver/blockmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/commitmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/fsmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/option"
|
||||
"github.com/haiwen/seafile-server/fileserver/repomgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/searpc"
|
||||
"github.com/haiwen/seafile-server/fileserver/share"
|
||||
@@ -38,49 +38,11 @@ var pidFilePath string
|
||||
var logFp *os.File
|
||||
|
||||
var dbType string
|
||||
var groupTableName string
|
||||
var cloudMode bool
|
||||
var privateKey string
|
||||
var seafileDB, ccnetDB *sql.DB
|
||||
|
||||
// when SQLite is used, user and group db are separated.
|
||||
var userDB, groupDB *sql.DB
|
||||
|
||||
// Storage unit.
|
||||
const (
|
||||
KB = 1000
|
||||
MB = 1000000
|
||||
GB = 1000000000
|
||||
TB = 1000000000000
|
||||
)
|
||||
|
||||
type fileServerOptions struct {
|
||||
host string
|
||||
port uint32
|
||||
maxUploadSize uint64
|
||||
maxDownloadDirSize uint64
|
||||
// Block size for indexing uploaded files
|
||||
fixedBlockSize uint64
|
||||
// Maximum number of goroutines to index uploaded files
|
||||
maxIndexingThreads uint32
|
||||
webTokenExpireTime uint32
|
||||
// File mode for temp files
|
||||
clusterSharedTempFileMode uint32
|
||||
windowsEncoding string
|
||||
// Timeout for fs-id-list requests.
|
||||
fsIDListRequestTimeout uint32
|
||||
defaultQuota int64
|
||||
// Profile password
|
||||
profilePassword string
|
||||
enableProfiling bool
|
||||
// Go log level
|
||||
logLevel string
|
||||
fsCacheLimit int64
|
||||
fsIdListRequestTimeout int64
|
||||
}
|
||||
|
||||
var options fileServerOptions
|
||||
|
||||
func init() {
|
||||
flag.StringVar(¢ralDir, "F", "", "central config directory")
|
||||
flag.StringVar(&dataDir, "d", "", "seafile data directory")
|
||||
@@ -244,177 +206,6 @@ func loadSeafileDB() {
|
||||
dbType = dbEngine
|
||||
}
|
||||
|
||||
func parseQuota(quotaStr string) int64 {
|
||||
var quota int64
|
||||
var multiplier int64 = GB
|
||||
if end := strings.Index(quotaStr, "kb"); end > 0 {
|
||||
multiplier = KB
|
||||
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
} else if end := strings.Index(quotaStr, "mb"); end > 0 {
|
||||
multiplier = MB
|
||||
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
} else if end := strings.Index(quotaStr, "gb"); end > 0 {
|
||||
multiplier = GB
|
||||
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
} else if end := strings.Index(quotaStr, "tb"); end > 0 {
|
||||
multiplier = TB
|
||||
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
} else {
|
||||
quotaInt, err := strconv.ParseInt(quotaStr, 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
}
|
||||
|
||||
return quota
|
||||
}
|
||||
|
||||
func loadFileServerOptions() {
|
||||
seafileConfPath := filepath.Join(centralDir, "seafile.conf")
|
||||
|
||||
config, err := ini.Load(seafileConfPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load seafile.conf: %v", err)
|
||||
}
|
||||
cloudMode = false
|
||||
if section, err := config.GetSection("general"); err == nil {
|
||||
if key, err := section.GetKey("cloud_mode"); err == nil {
|
||||
cloudMode, _ = key.Bool()
|
||||
}
|
||||
}
|
||||
|
||||
if section, err := config.GetSection("notification"); err == nil {
|
||||
if key, err := section.GetKey("private_key"); err == nil {
|
||||
privateKey = key.String()
|
||||
}
|
||||
}
|
||||
|
||||
initDefaultOptions()
|
||||
|
||||
if section, err := config.GetSection("httpserver"); err == nil {
|
||||
parseFileServerSection(section)
|
||||
}
|
||||
if section, err := config.GetSection("fileserver"); err == nil {
|
||||
parseFileServerSection(section)
|
||||
}
|
||||
|
||||
if section, err := config.GetSection("quota"); err == nil {
|
||||
if key, err := section.GetKey("default"); err == nil {
|
||||
quotaStr := key.String()
|
||||
options.defaultQuota = parseQuota(quotaStr)
|
||||
}
|
||||
}
|
||||
|
||||
ccnetConfPath := filepath.Join(centralDir, "ccnet.conf")
|
||||
config, err = ini.Load(ccnetConfPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load ccnet.conf: %v", err)
|
||||
}
|
||||
groupTableName = "Group"
|
||||
if section, err := config.GetSection("GROUP"); err == nil {
|
||||
if key, err := section.GetKey("TABLE_NAME"); err == nil {
|
||||
groupTableName = key.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseFileServerSection(section *ini.Section) {
|
||||
if key, err := section.GetKey("host"); err == nil {
|
||||
options.host = key.String()
|
||||
}
|
||||
if key, err := section.GetKey("port"); err == nil {
|
||||
port, err := key.Uint()
|
||||
if err == nil {
|
||||
options.port = uint32(port)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("max_upload_size"); err == nil {
|
||||
size, err := key.Uint()
|
||||
if err == nil {
|
||||
options.maxUploadSize = uint64(size) * (1 << 20)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("max_indexing_threads"); err == nil {
|
||||
threads, err := key.Uint()
|
||||
if err == nil {
|
||||
options.maxIndexingThreads = uint32(threads)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("fixed_block_size"); err == nil {
|
||||
blkSize, err := key.Uint64()
|
||||
if err == nil {
|
||||
options.fixedBlockSize = blkSize * (1 << 20)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("web_token_expire_time"); err == nil {
|
||||
expire, err := key.Uint()
|
||||
if err == nil {
|
||||
options.webTokenExpireTime = uint32(expire)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("cluster_shared_temp_file_mode"); err == nil {
|
||||
fileMode, err := key.Uint()
|
||||
if err == nil {
|
||||
options.clusterSharedTempFileMode = uint32(fileMode)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("enable_profiling"); err == nil {
|
||||
options.enableProfiling, _ = key.Bool()
|
||||
}
|
||||
if options.enableProfiling {
|
||||
if key, err := section.GetKey("profile_password"); err == nil {
|
||||
options.profilePassword = key.String()
|
||||
} else {
|
||||
log.Fatal("password of profiling must be specified.")
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("go_log_level"); err == nil {
|
||||
options.logLevel = key.String()
|
||||
}
|
||||
if key, err := section.GetKey("fs_cache_limit"); err == nil {
|
||||
fsCacheLimit, err := key.Int64()
|
||||
if err == nil {
|
||||
options.fsCacheLimit = fsCacheLimit * 1024 * 1024
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("fs_id_list_request_timeout"); err == nil {
|
||||
fsIdListRequestTimeout, err := key.Int64()
|
||||
if err == nil {
|
||||
options.fsIdListRequestTimeout = fsIdListRequestTimeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func initDefaultOptions() {
|
||||
options.host = "0.0.0.0"
|
||||
options.port = 8082
|
||||
options.maxDownloadDirSize = 100 * (1 << 20)
|
||||
options.fixedBlockSize = 1 << 23
|
||||
options.maxIndexingThreads = 1
|
||||
options.webTokenExpireTime = 7200
|
||||
options.clusterSharedTempFileMode = 0600
|
||||
options.defaultQuota = InfiniteQuota
|
||||
options.fsCacheLimit = 2 << 30
|
||||
options.fsIdListRequestTimeout = -1
|
||||
}
|
||||
|
||||
func writePidFile(pid_file_path string) error {
|
||||
file, err := os.OpenFile(pid_file_path, os.O_CREATE|os.O_WRONLY, 0664)
|
||||
if err != nil {
|
||||
@@ -470,7 +261,7 @@ func main() {
|
||||
log.Fatalf("Failed to convert seafile data dir to absolute path: %v.", err)
|
||||
}
|
||||
loadSeafileDB()
|
||||
loadFileServerOptions()
|
||||
option.LoadFileServerOptions(centralDir)
|
||||
|
||||
if logFile == "" {
|
||||
absLogFile = filepath.Join(absDataDir, "fileserver.log")
|
||||
@@ -494,7 +285,7 @@ func main() {
|
||||
}
|
||||
// When logFile is "-", use default output (StdOut)
|
||||
|
||||
level, err := log.ParseLevel(options.logLevel)
|
||||
level, err := log.ParseLevel(option.LogLevel)
|
||||
if err != nil {
|
||||
log.Info("use the default log level: info")
|
||||
log.SetLevel(log.InfoLevel)
|
||||
@@ -513,13 +304,13 @@ func main() {
|
||||
|
||||
repomgr.Init(seafileDB)
|
||||
|
||||
fsmgr.Init(centralDir, dataDir, options.fsCacheLimit)
|
||||
fsmgr.Init(centralDir, dataDir, option.FsCacheLimit)
|
||||
|
||||
blockmgr.Init(centralDir, dataDir)
|
||||
|
||||
commitmgr.Init(centralDir, dataDir)
|
||||
|
||||
share.Init(ccnetDB, seafileDB, groupTableName, cloudMode)
|
||||
share.Init(ccnetDB, seafileDB, option.GroupTableName, option.CloudMode)
|
||||
|
||||
rpcClientInit()
|
||||
|
||||
@@ -540,7 +331,7 @@ func main() {
|
||||
|
||||
log.Print("Seafile file server started.")
|
||||
|
||||
addr := fmt.Sprintf("%s:%d", options.host, options.port)
|
||||
addr := fmt.Sprintf("%s:%d", option.Host, option.Port)
|
||||
err = http.ListenAndServe(addr, router)
|
||||
if err != nil {
|
||||
log.Printf("File server exiting: %v", err)
|
||||
@@ -686,7 +477,7 @@ type profileHandler struct {
|
||||
func (p *profileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
queries := r.URL.Query()
|
||||
password := queries.Get("password")
|
||||
if !options.enableProfiling || password != options.profilePassword {
|
||||
if !option.EnableProfiling || password != option.ProfilePassword {
|
||||
http.Error(w, "", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/haiwen/seafile-server/fileserver/commitmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/fsmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/option"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -213,7 +214,7 @@ func mergeTestDelFile() error {
|
||||
|
||||
func TestMergeTrees(t *testing.T) {
|
||||
commitmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir)
|
||||
fsmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir, options.fsCacheLimit)
|
||||
fsmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir, option.FsCacheLimit)
|
||||
err := mergeTestCreateTestDir()
|
||||
if err != nil {
|
||||
fmt.Printf("failed to create test dir: %v", err)
|
||||
|
258
fileserver/option/option.go
Normal file
258
fileserver/option/option.go
Normal file
@@ -0,0 +1,258 @@
|
||||
package option
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
// InfiniteQuota indicates that the quota is unlimited.
|
||||
const InfiniteQuota = -2
|
||||
|
||||
// Storage unit.
|
||||
const (
|
||||
KB = 1000
|
||||
MB = 1000000
|
||||
GB = 1000000000
|
||||
TB = 1000000000000
|
||||
)
|
||||
|
||||
var (
|
||||
// fileserver options
|
||||
Host string
|
||||
Port uint32
|
||||
MaxUploadSize uint64
|
||||
MaxDownloadDirSize uint64
|
||||
FsIdListRequestTimeout int64
|
||||
// Block size for indexing uploaded files
|
||||
FixedBlockSize uint64
|
||||
// Maximum number of goroutines to index uploaded files
|
||||
MaxIndexingThreads uint32
|
||||
WebTokenExpireTime uint32
|
||||
// File mode for temp files
|
||||
ClusterSharedTempFileMode uint32
|
||||
WindowsEncoding string
|
||||
SkipBlockHash bool
|
||||
FsCacheLimit int64
|
||||
|
||||
// general options
|
||||
CloudMode bool
|
||||
|
||||
// notification server
|
||||
EnableNotification bool
|
||||
NotificationURL string
|
||||
// notification options
|
||||
PrivateKey string
|
||||
|
||||
// GROUP options
|
||||
GroupTableName string
|
||||
|
||||
// quota options
|
||||
DefaultQuota int64
|
||||
|
||||
// Profile password
|
||||
ProfilePassword string
|
||||
EnableProfiling bool
|
||||
|
||||
// Go log level
|
||||
LogLevel string
|
||||
)
|
||||
|
||||
func initDefaultOptions() {
|
||||
Host = "0.0.0.0"
|
||||
Port = 8082
|
||||
MaxDownloadDirSize = 100 * (1 << 20)
|
||||
FixedBlockSize = 1 << 23
|
||||
MaxIndexingThreads = 1
|
||||
WebTokenExpireTime = 7200
|
||||
ClusterSharedTempFileMode = 0600
|
||||
DefaultQuota = InfiniteQuota
|
||||
FsCacheLimit = 2 << 30
|
||||
FsIdListRequestTimeout = -1
|
||||
}
|
||||
|
||||
func LoadFileServerOptions(centralDir string) {
|
||||
initDefaultOptions()
|
||||
|
||||
seafileConfPath := filepath.Join(centralDir, "seafile.conf")
|
||||
|
||||
config, err := ini.Load(seafileConfPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load seafile.conf: %v", err)
|
||||
}
|
||||
CloudMode = false
|
||||
if section, err := config.GetSection("general"); err == nil {
|
||||
if key, err := section.GetKey("cloud_mode"); err == nil {
|
||||
CloudMode, _ = key.Bool()
|
||||
}
|
||||
}
|
||||
|
||||
if section, err := config.GetSection("notification"); err == nil {
|
||||
if key, err := section.GetKey("enabled"); err == nil {
|
||||
EnableNotification, _ = key.Bool()
|
||||
}
|
||||
}
|
||||
|
||||
if EnableNotification {
|
||||
var notifServer string
|
||||
var notifPort uint32
|
||||
if section, err := config.GetSection("notification"); err == nil {
|
||||
if key, err := section.GetKey("jwt_private_key"); err == nil {
|
||||
PrivateKey = key.String()
|
||||
}
|
||||
}
|
||||
if section, err := config.GetSection("notification"); err == nil {
|
||||
if key, err := section.GetKey("host"); err == nil {
|
||||
notifServer = key.String()
|
||||
}
|
||||
}
|
||||
if section, err := config.GetSection("notification"); err == nil {
|
||||
if key, err := section.GetKey("port"); err == nil {
|
||||
port, err := key.Uint()
|
||||
if err == nil {
|
||||
notifPort = uint32(port)
|
||||
}
|
||||
}
|
||||
}
|
||||
NotificationURL = fmt.Sprintf("%s:%d", notifServer, notifPort)
|
||||
}
|
||||
|
||||
if section, err := config.GetSection("httpserver"); err == nil {
|
||||
parseFileServerSection(section)
|
||||
}
|
||||
if section, err := config.GetSection("fileserver"); err == nil {
|
||||
parseFileServerSection(section)
|
||||
}
|
||||
|
||||
if section, err := config.GetSection("quota"); err == nil {
|
||||
if key, err := section.GetKey("default"); err == nil {
|
||||
quotaStr := key.String()
|
||||
DefaultQuota = parseQuota(quotaStr)
|
||||
}
|
||||
}
|
||||
|
||||
ccnetConfPath := filepath.Join(centralDir, "ccnet.conf")
|
||||
config, err = ini.Load(ccnetConfPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load ccnet.conf: %v", err)
|
||||
}
|
||||
GroupTableName = "Group"
|
||||
if section, err := config.GetSection("GROUP"); err == nil {
|
||||
if key, err := section.GetKey("TABLE_NAME"); err == nil {
|
||||
GroupTableName = key.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseFileServerSection(section *ini.Section) {
|
||||
if key, err := section.GetKey("host"); err == nil {
|
||||
Host = key.String()
|
||||
}
|
||||
if key, err := section.GetKey("port"); err == nil {
|
||||
port, err := key.Uint()
|
||||
if err == nil {
|
||||
Port = uint32(port)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("max_upload_size"); err == nil {
|
||||
size, err := key.Uint()
|
||||
if err == nil {
|
||||
MaxUploadSize = uint64(size) * (1 << 20)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("max_indexing_threads"); err == nil {
|
||||
threads, err := key.Uint()
|
||||
if err == nil {
|
||||
MaxIndexingThreads = uint32(threads)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("fixed_block_size"); err == nil {
|
||||
blkSize, err := key.Uint64()
|
||||
if err == nil {
|
||||
FixedBlockSize = blkSize * (1 << 20)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("web_token_expire_time"); err == nil {
|
||||
expire, err := key.Uint()
|
||||
if err == nil {
|
||||
WebTokenExpireTime = uint32(expire)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("cluster_shared_temp_file_mode"); err == nil {
|
||||
fileMode, err := key.Uint()
|
||||
if err == nil {
|
||||
ClusterSharedTempFileMode = uint32(fileMode)
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("enable_profiling"); err == nil {
|
||||
EnableProfiling, _ = key.Bool()
|
||||
}
|
||||
if EnableProfiling {
|
||||
if key, err := section.GetKey("profile_password"); err == nil {
|
||||
ProfilePassword = key.String()
|
||||
} else {
|
||||
log.Fatal("password of profiling must be specified.")
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("go_log_level"); err == nil {
|
||||
LogLevel = key.String()
|
||||
}
|
||||
if key, err := section.GetKey("fs_cache_limit"); err == nil {
|
||||
fsCacheLimit, err := key.Int64()
|
||||
if err == nil {
|
||||
FsCacheLimit = fsCacheLimit * 1024 * 1024
|
||||
}
|
||||
}
|
||||
if key, err := section.GetKey("fs_id_list_request_timeout"); err == nil {
|
||||
fsIdListRequestTimeout, err := key.Int64()
|
||||
if err == nil {
|
||||
FsIdListRequestTimeout = fsIdListRequestTimeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseQuota(quotaStr string) int64 {
|
||||
var quota int64
|
||||
var multiplier int64 = GB
|
||||
if end := strings.Index(quotaStr, "kb"); end > 0 {
|
||||
multiplier = KB
|
||||
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
} else if end := strings.Index(quotaStr, "mb"); end > 0 {
|
||||
multiplier = MB
|
||||
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
} else if end := strings.Index(quotaStr, "gb"); end > 0 {
|
||||
multiplier = GB
|
||||
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
} else if end := strings.Index(quotaStr, "tb"); end > 0 {
|
||||
multiplier = TB
|
||||
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
} else {
|
||||
quotaInt, err := strconv.ParseInt(quotaStr, 10, 0)
|
||||
if err != nil {
|
||||
return InfiniteQuota
|
||||
}
|
||||
quota = quotaInt * multiplier
|
||||
}
|
||||
|
||||
return quota
|
||||
}
|
@@ -4,6 +4,7 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/haiwen/seafile-server/fileserver/option"
|
||||
"github.com/haiwen/seafile-server/fileserver/repomgr"
|
||||
)
|
||||
|
||||
@@ -70,7 +71,7 @@ func getUserQuota(user string) (int64, error) {
|
||||
}
|
||||
|
||||
if quota <= 0 {
|
||||
quota = options.defaultQuota
|
||||
quota = option.DefaultQuota
|
||||
}
|
||||
|
||||
return quota, nil
|
||||
|
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/haiwen/seafile-server/fileserver/commitmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/diff"
|
||||
"github.com/haiwen/seafile-server/fileserver/fsmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/option"
|
||||
"github.com/haiwen/seafile-server/fileserver/repomgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/share"
|
||||
"github.com/haiwen/seafile-server/fileserver/utils"
|
||||
@@ -726,8 +727,9 @@ func getJWTTokenCB(rsp http.ResponseWriter, r *http.Request) *appError {
|
||||
vars := mux.Vars(r)
|
||||
repoID := vars["repoid"]
|
||||
|
||||
if privateKey == "" {
|
||||
return nil
|
||||
if !option.EnableNotification {
|
||||
err := fmt.Errorf("notification server is not enabled")
|
||||
return &appError{err, "", http.StatusInternalServerError}
|
||||
}
|
||||
|
||||
user, appErr := validateToken(r, repoID, false)
|
||||
@@ -735,16 +737,8 @@ func getJWTTokenCB(rsp http.ResponseWriter, r *http.Request) *appError {
|
||||
return appErr
|
||||
}
|
||||
|
||||
claims := MyClaims{
|
||||
time.Now().Add(time.Hour * 72).Unix(),
|
||||
repoID,
|
||||
user,
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), &claims)
|
||||
tokenString, err := token.SignedString([]byte(privateKey))
|
||||
tokenString, err := genJWTToken(repoID, user)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to gen jwt token for repo %s", repoID)
|
||||
return &appError{err, "", http.StatusInternalServerError}
|
||||
}
|
||||
|
||||
@@ -755,6 +749,23 @@ func getJWTTokenCB(rsp http.ResponseWriter, r *http.Request) *appError {
|
||||
return nil
|
||||
}
|
||||
|
||||
func genJWTToken(repoID, user string) (string, error) {
|
||||
claims := MyClaims{
|
||||
time.Now().Add(time.Hour * 72).Unix(),
|
||||
repoID,
|
||||
user,
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), &claims)
|
||||
tokenString, err := token.SignedString([]byte(option.PrivateKey))
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to gen jwt token for repo %s", repoID)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return tokenString, nil
|
||||
}
|
||||
|
||||
func isValidUUID(u string) bool {
|
||||
_, err := uuid.Parse(u)
|
||||
return err == nil
|
||||
@@ -1399,9 +1410,9 @@ func collectDirIDs(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent
|
||||
info.results = append(info.results, dir1.ID)
|
||||
}
|
||||
|
||||
if options.fsIdListRequestTimeout > 0 {
|
||||
if option.FsIdListRequestTimeout > 0 {
|
||||
now := time.Now().Unix()
|
||||
if now-info.startTime > options.fsIdListRequestTimeout {
|
||||
if now-info.startTime > option.FsIdListRequestTimeout {
|
||||
info.isTimeout = true
|
||||
return ErrTimeout
|
||||
}
|
||||
|
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/haiwen/seafile-server/fileserver/commitmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/diff"
|
||||
"github.com/haiwen/seafile-server/fileserver/fsmgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/option"
|
||||
"github.com/haiwen/seafile-server/fileserver/repomgr"
|
||||
"github.com/haiwen/seafile-server/fileserver/workerpool"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -254,7 +255,7 @@ func handleMissingVirtualRepo(repo *repomgr.Repo, head *commitmgr.Commit, vInfo
|
||||
if err != nil || oldDirID == "" {
|
||||
|
||||
if err == fsmgr.ErrPathNoExist {
|
||||
repomgr.DelVirtualRepo(vInfo.RepoID, cloudMode)
|
||||
repomgr.DelVirtualRepo(vInfo.RepoID, option.CloudMode)
|
||||
}
|
||||
err := fmt.Errorf("failed to find %s under commit %s in repo %s", parPath, parent.CommitID, repo.StoreID)
|
||||
return "", err
|
||||
@@ -296,7 +297,7 @@ func handleMissingVirtualRepo(repo *repomgr.Repo, head *commitmgr.Commit, vInfo
|
||||
}
|
||||
|
||||
if !isRenamed {
|
||||
repomgr.DelVirtualRepo(vInfo.RepoID, cloudMode)
|
||||
repomgr.DelVirtualRepo(vInfo.RepoID, option.CloudMode)
|
||||
}
|
||||
|
||||
return returnPath, nil
|
||||
|
@@ -1,6 +0,0 @@
|
||||
[general]
|
||||
host = 0.0.0.0
|
||||
port = 8083
|
||||
log_level = info
|
||||
private_key = "my primary key"
|
||||
notification_token = zzzz
|
Reference in New Issue
Block a user