mirror of
https://github.com/haiwen/seafile-server.git
synced 2025-05-11 01:27:27 +00:00
Merge branch '11.0'
This commit is contained in:
commit
11d4741224
@ -14,9 +14,9 @@ endif
|
||||
|
||||
MAKE_SERVER = server tools $(MAKE_CONTROLLER) $(MAKE_FUSE)
|
||||
|
||||
SUBDIRS = include lib common python $(MAKE_SERVER) doc
|
||||
SUBDIRS = include lib common python $(MAKE_SERVER) doc scripts
|
||||
|
||||
DIST_SUBDIRS = include lib common python server tools controller fuse doc
|
||||
DIST_SUBDIRS = include lib common python server tools controller fuse doc scripts
|
||||
|
||||
INTLTOOL = \
|
||||
intltool-extract.in \
|
||||
|
@ -356,6 +356,8 @@ notify_repo_update (const char *repo_id, const char *commit_id)
|
||||
static void
|
||||
on_branch_updated (SeafBranchManager *mgr, SeafBranch *branch)
|
||||
{
|
||||
if (seaf->is_repair)
|
||||
return;
|
||||
seaf_repo_manager_update_repo_info (seaf->repo_mgr, branch->repo_id, branch->commit_id);
|
||||
|
||||
notify_repo_update(branch->repo_id, branch->commit_id);
|
||||
|
@ -13,6 +13,31 @@ merge_trees_recursive (const char *store_id, int version,
|
||||
const char *basedir,
|
||||
MergeOptions *opt);
|
||||
|
||||
static const char *
|
||||
get_nickname_by_modifier (GHashTable *email_to_nickname, const char *modifier)
|
||||
{
|
||||
const char *nickname = NULL;
|
||||
|
||||
if (!modifier) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nickname = g_hash_table_lookup (email_to_nickname, modifier);
|
||||
if (nickname) {
|
||||
return nickname;
|
||||
}
|
||||
|
||||
char *sql = "SELECT nickname from profile_profile WHERE user = ?";
|
||||
nickname = seaf_db_statement_get_string(seaf->seahub_db, sql, 1, "string", modifier);
|
||||
|
||||
if (!nickname) {
|
||||
nickname = modifier;
|
||||
}
|
||||
g_hash_table_insert (email_to_nickname, g_strdup(modifier), g_strdup(nickname));
|
||||
|
||||
return nickname;
|
||||
}
|
||||
|
||||
static char *
|
||||
merge_conflict_filename (const char *store_id, int version,
|
||||
MergeOptions *opt,
|
||||
@ -20,6 +45,7 @@ merge_conflict_filename (const char *store_id, int version,
|
||||
const char *filename)
|
||||
{
|
||||
char *path = NULL, *modifier = NULL, *conflict_name = NULL;
|
||||
const char *nickname = NULL;
|
||||
gint64 mtime;
|
||||
SeafCommit *commit;
|
||||
|
||||
@ -46,7 +72,11 @@ merge_conflict_filename (const char *store_id, int version,
|
||||
seaf_commit_unref (commit);
|
||||
}
|
||||
|
||||
conflict_name = gen_conflict_path (filename, modifier, mtime);
|
||||
nickname = modifier;
|
||||
if (seaf->seahub_db)
|
||||
nickname = get_nickname_by_modifier (opt->email_to_nickname, modifier);
|
||||
|
||||
conflict_name = gen_conflict_path (filename, nickname, mtime);
|
||||
|
||||
out:
|
||||
g_free (path);
|
||||
@ -61,6 +91,7 @@ merge_conflict_dirname (const char *store_id, int version,
|
||||
const char *dirname)
|
||||
{
|
||||
char *modifier = NULL, *conflict_name = NULL;
|
||||
const char *nickname = NULL;
|
||||
SeafCommit *commit;
|
||||
|
||||
commit = seaf_commit_manager_get_commit (seaf->commit_mgr,
|
||||
@ -74,22 +105,23 @@ merge_conflict_dirname (const char *store_id, int version,
|
||||
modifier = g_strdup(commit->creator_name);
|
||||
seaf_commit_unref (commit);
|
||||
|
||||
conflict_name = gen_conflict_path (dirname, modifier, (gint64)time(NULL));
|
||||
nickname = modifier;
|
||||
if (seaf->seahub_db)
|
||||
nickname = get_nickname_by_modifier (opt->email_to_nickname, modifier);
|
||||
|
||||
conflict_name = gen_conflict_path (dirname, nickname, (gint64)time(NULL));
|
||||
|
||||
out:
|
||||
g_free (modifier);
|
||||
return conflict_name;
|
||||
}
|
||||
|
||||
static int
|
||||
merge_entries (const char *store_id, int version,
|
||||
int n, SeafDirent *dents[],
|
||||
const char *basedir,
|
||||
GList **dents_out,
|
||||
MergeOptions *opt)
|
||||
int twoway_merge(const char *store_id, int version, const char *basedir,
|
||||
SeafDirent *dents[], GList **dents_out, struct MergeOptions *opt)
|
||||
{
|
||||
SeafDirent *files[3];
|
||||
SeafDirent *files[2];
|
||||
int i;
|
||||
int n = opt->n_ways;
|
||||
|
||||
memset (files, 0, sizeof(files[0])*n);
|
||||
for (i = 0; i < n; ++i) {
|
||||
@ -97,15 +129,66 @@ merge_entries (const char *store_id, int version,
|
||||
files[i] = dents[i];
|
||||
}
|
||||
|
||||
/* If we're running 2-way merge, or the caller requires not to
|
||||
* actually merge contents, just call the callback function.
|
||||
*/
|
||||
if (n == 2 || !opt->do_merge)
|
||||
return opt->callback (basedir, files, opt);
|
||||
SeafDirent *head, *remote;
|
||||
char *conflict_name;
|
||||
|
||||
/* Otherwise, we're doing a real 3-way merge of the trees.
|
||||
* It means merge files and handle any conflicts.
|
||||
*/
|
||||
head = files[0];
|
||||
remote = files[1];
|
||||
|
||||
if (head && remote) {
|
||||
if (strcmp (head->id, remote->id) == 0) {
|
||||
// file match
|
||||
seaf_debug ("%s%s: files match\n", basedir, head->name);
|
||||
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
|
||||
} else {
|
||||
// file content conflict
|
||||
seaf_debug ("%s%s: files conflict\n", basedir, head->name);
|
||||
conflict_name = merge_conflict_filename(store_id, version,
|
||||
opt,
|
||||
basedir,
|
||||
head->name);
|
||||
if (!conflict_name)
|
||||
return -1;
|
||||
|
||||
g_free (remote->name);
|
||||
remote->name = conflict_name;
|
||||
remote->name_len = strlen (remote->name);
|
||||
|
||||
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
|
||||
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
|
||||
|
||||
opt->conflict = TRUE;
|
||||
}
|
||||
} else if (!head && remote) {
|
||||
// file not in head, but in remote
|
||||
seaf_debug ("%s%s: added in remote\n", basedir, remote->name);
|
||||
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
|
||||
} else if (head && !remote) {
|
||||
// file in head, but not in remote
|
||||
seaf_debug ("%s%s: added in head\n", basedir, head->name);
|
||||
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
threeway_merge (const char *store_id, int version,
|
||||
SeafDirent *dents[],
|
||||
const char *basedir,
|
||||
GList **dents_out,
|
||||
MergeOptions *opt)
|
||||
{
|
||||
SeafDirent *files[3];
|
||||
int i;
|
||||
gint64 curr_time;
|
||||
int n = opt->n_ways;
|
||||
|
||||
memset (files, 0, sizeof(files[0])*n);
|
||||
for (i = 0; i < n; ++i) {
|
||||
if (dents[i] && S_ISREG(dents[i]->mode))
|
||||
files[i] = dents[i];
|
||||
}
|
||||
|
||||
SeafDirent *base, *head, *remote;
|
||||
char *conflict_name;
|
||||
@ -323,6 +406,25 @@ merge_entries (const char *store_id, int version,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
merge_entries (const char *store_id, int version,
|
||||
int n, SeafDirent *dents[],
|
||||
const char *basedir,
|
||||
GList **dents_out,
|
||||
MergeOptions *opt)
|
||||
{
|
||||
/* If we're running 2-way merge, it means merge files base on head and remote.
|
||||
*/
|
||||
if (n == 2)
|
||||
return twoway_merge (store_id, version, basedir, dents, dents_out, opt);
|
||||
|
||||
/* Otherwise, we're doing a real 3-way merge of the trees.
|
||||
* It means merge files and handle any conflicts.
|
||||
*/
|
||||
|
||||
return threeway_merge (store_id, version, dents, basedir, dents_out, opt);
|
||||
}
|
||||
|
||||
static int
|
||||
merge_directories (const char *store_id, int version,
|
||||
int n, SeafDirent *dents[],
|
||||
@ -345,7 +447,7 @@ merge_directories (const char *store_id, int version,
|
||||
|
||||
seaf_debug ("dir_mask = %d\n", dir_mask);
|
||||
|
||||
if (n == 3 && opt->do_merge) {
|
||||
if (n == 3) {
|
||||
switch (dir_mask) {
|
||||
case 0:
|
||||
g_return_val_if_reached (-1);
|
||||
@ -407,6 +509,33 @@ merge_directories (const char *store_id, int version,
|
||||
default:
|
||||
g_return_val_if_reached (-1);
|
||||
}
|
||||
} else if (n == 2) {
|
||||
switch (dir_mask) {
|
||||
case 0:
|
||||
g_return_val_if_reached (-1);
|
||||
case 1:
|
||||
/*head is dir, remote is not dir*/
|
||||
seaf_debug ("%s%s: only head is dir\n", basedir, dents[0]->name);
|
||||
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[0]));
|
||||
return 0;
|
||||
case 2:
|
||||
/*head is not dir, remote is dir*/
|
||||
seaf_debug ("%s%s: only remote is dir\n", basedir, dents[1]->name);
|
||||
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));
|
||||
return 0;
|
||||
case 3:
|
||||
if (strcmp (dents[0]->id, dents[1]->id) == 0) {
|
||||
seaf_debug ("%s%s: dir is the same in head and remote\n",
|
||||
basedir, dents[0]->name);
|
||||
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));
|
||||
return 0;
|
||||
}
|
||||
seaf_debug ("%s%s: dir is changed in head and remote, merge recursively\n",
|
||||
basedir, dents[0]->name);
|
||||
break;
|
||||
default:
|
||||
g_return_val_if_reached (-1);
|
||||
}
|
||||
}
|
||||
|
||||
memset (sub_dirs, 0, sizeof(sub_dirs[0])*n);
|
||||
@ -433,7 +562,7 @@ merge_directories (const char *store_id, int version,
|
||||
|
||||
g_free (new_basedir);
|
||||
|
||||
if (n == 3 && opt->do_merge) {
|
||||
if (n == 3) {
|
||||
if (dir_mask == 3 || dir_mask == 6 || dir_mask == 7) {
|
||||
merged_dent = seaf_dirent_dup (dents[1]);
|
||||
memcpy (merged_dent->id, opt->merged_tree_root, 40);
|
||||
@ -443,6 +572,12 @@ merge_directories (const char *store_id, int version,
|
||||
memcpy (merged_dent->id, opt->merged_tree_root, 40);
|
||||
*dents_out = g_list_prepend (*dents_out, merged_dent);
|
||||
}
|
||||
} else if (n == 2) {
|
||||
if (dir_mask == 3) {
|
||||
merged_dent = seaf_dirent_dup (dents[1]);
|
||||
memcpy (merged_dent->id, opt->merged_tree_root, 40);
|
||||
*dents_out = g_list_prepend (*dents_out, merged_dent);
|
||||
}
|
||||
}
|
||||
|
||||
free_sub_dirs:
|
||||
@ -539,7 +674,7 @@ merge_trees_recursive (const char *store_id, int version,
|
||||
}
|
||||
}
|
||||
|
||||
if (n == 3 && opt->do_merge) {
|
||||
if (n == 3) {
|
||||
merged_dents = g_list_sort (merged_dents, compare_dirents);
|
||||
merged_tree = seaf_dir_new (NULL, merged_dents,
|
||||
dir_version_from_repo_version(version));
|
||||
@ -556,6 +691,23 @@ merge_trees_recursive (const char *store_id, int version,
|
||||
seaf_warning ("Failed to save merged tree %s:%s.\n", store_id, basedir);
|
||||
}
|
||||
}
|
||||
} else if (n == 2) {
|
||||
merged_dents = g_list_sort (merged_dents, compare_dirents);
|
||||
merged_tree = seaf_dir_new (NULL, merged_dents,
|
||||
dir_version_from_repo_version(version));
|
||||
|
||||
memcpy (opt->merged_tree_root, merged_tree->dir_id, 40);
|
||||
|
||||
if ((trees[0] && strcmp (trees[0]->dir_id, merged_tree->dir_id) == 0) ||
|
||||
(trees[1] && strcmp (trees[1]->dir_id, merged_tree->dir_id) == 0)) {
|
||||
seaf_dir_free (merged_tree);
|
||||
} else {
|
||||
ret = seaf_dir_save (seaf->fs_mgr, store_id, version, merged_tree);
|
||||
seaf_dir_free (merged_tree);
|
||||
if (ret < 0) {
|
||||
seaf_warning ("Failed to save merged tree %s:%s.\n", store_id, basedir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -570,6 +722,11 @@ seaf_merge_trees (const char *store_id, int version,
|
||||
|
||||
g_return_val_if_fail (n == 2 || n == 3, -1);
|
||||
|
||||
opt->email_to_nickname = g_hash_table_new_full(g_str_hash,
|
||||
g_str_equal,
|
||||
g_free,
|
||||
g_free);
|
||||
|
||||
trees = g_new0 (SeafDir *, n);
|
||||
for (i = 0; i < n; ++i) {
|
||||
root = seaf_fs_manager_get_seafdir (seaf->fs_mgr, store_id, version, roots[i]);
|
||||
@ -587,5 +744,7 @@ seaf_merge_trees (const char *store_id, int version,
|
||||
seaf_dir_free (trees[i]);
|
||||
g_free (trees);
|
||||
|
||||
g_hash_table_destroy (opt->email_to_nickname);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -25,6 +25,8 @@ typedef struct MergeOptions {
|
||||
char merged_tree_root[41]; /* merge result */
|
||||
int visit_dirs;
|
||||
gboolean conflict;
|
||||
|
||||
GHashTable *email_to_nickname;
|
||||
} MergeOptions;
|
||||
|
||||
int
|
||||
|
@ -379,3 +379,129 @@ load_ccnet_database_config (SeafileSession *session)
|
||||
g_free (engine);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static char *
|
||||
parse_seahub_db_config ()
|
||||
{
|
||||
char buf[1024];
|
||||
GError *error = NULL;
|
||||
int retcode = 0;
|
||||
char *child_stdout = NULL;
|
||||
char *child_stderr = NULL;
|
||||
|
||||
char *binary_path = g_find_program_in_path ("parse_seahub_db.py");
|
||||
|
||||
snprintf (buf,
|
||||
sizeof(buf),
|
||||
"python3 %s",
|
||||
binary_path);
|
||||
g_spawn_command_line_sync (buf,
|
||||
&child_stdout,
|
||||
&child_stderr,
|
||||
&retcode,
|
||||
&error);
|
||||
|
||||
if (error != NULL) {
|
||||
seaf_warning ("Failed to run python parse_seahub_db.py: %s\n", error->message);
|
||||
g_free (binary_path);
|
||||
g_free (child_stdout);
|
||||
g_free (child_stderr);
|
||||
g_clear_error (&error);
|
||||
return NULL;
|
||||
}
|
||||
g_spawn_check_exit_status (retcode, &error);
|
||||
if (error != NULL) {
|
||||
seaf_warning ("Failed to run python parse_seahub_db.py: %s\n", error->message);
|
||||
g_free (binary_path);
|
||||
g_free (child_stdout);
|
||||
g_free (child_stderr);
|
||||
g_clear_error (&error);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
g_free (binary_path);
|
||||
g_free (child_stderr);
|
||||
return child_stdout;
|
||||
}
|
||||
|
||||
int
|
||||
load_seahub_database_config (SeafileSession *session)
|
||||
{
|
||||
int ret = 0;
|
||||
json_t *object = NULL;
|
||||
json_error_t err;
|
||||
const char *engine = NULL, *name = NULL, *user = NULL, *password = NULL, *host = NULL, *charset = NULL;
|
||||
int port;
|
||||
char *json_str = NULL;
|
||||
|
||||
json_str = parse_seahub_db_config ();
|
||||
if (!json_str){
|
||||
seaf_warning ("Failed to parse seahub database config.\n");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
object = json_loadb (json_str, strlen(json_str), 0, &err);
|
||||
if (!object) {
|
||||
seaf_warning ("Failed to load seahub db json: %s: %s\n", json_str, err.text);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
engine = json_object_get_string_member (object, "ENGINE");
|
||||
name = json_object_get_string_member (object, "NAME");
|
||||
user = json_object_get_string_member (object, "USER");
|
||||
password = json_object_get_string_member (object, "PASSWORD");
|
||||
host = json_object_get_string_member (object, "HOST");
|
||||
charset = json_object_get_string_member (object, "CHARSET");
|
||||
port = json_object_get_int_member (object, "PORT");
|
||||
if (port <= 0) {
|
||||
port = MYSQL_DEFAULT_PORT;
|
||||
}
|
||||
|
||||
if (!engine || strstr (engine, "sqlite") != NULL) {
|
||||
goto out;
|
||||
}
|
||||
#ifdef HAVE_MYSQL
|
||||
else if (strstr (engine, "mysql") != NULL) {
|
||||
seaf_message("Use database Mysql\n");
|
||||
if (!host) {
|
||||
seaf_warning ("Seahub DB host not set in config.\n");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
if (!user) {
|
||||
seaf_warning ("Seahub DB user not set in config.\n");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
if (!password) {
|
||||
seaf_warning ("Seahub DB password not set in config.\n");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
if (!name) {
|
||||
seaf_warning ("Seahub DB name not set in config.\n");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
session->seahub_db = seaf_db_new_mysql (host, port, user, password, name, NULL, FALSE, FALSE, NULL, charset, DEFAULT_MAX_CONNECTIONS);
|
||||
if (!session->seahub_db) {
|
||||
seaf_warning ("Failed to open seahub database.\n");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
seaf_warning ("Unknown database type: %s.\n", engine);
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
out:
|
||||
if (object)
|
||||
json_decref (object);
|
||||
g_free (json_str);
|
||||
return ret;
|
||||
}
|
||||
|
@ -16,4 +16,7 @@ load_database_config (struct _SeafileSession *session);
|
||||
int
|
||||
load_ccnet_database_config (struct _SeafileSession *session);
|
||||
|
||||
int
|
||||
load_seahub_database_config (SeafileSession *session);
|
||||
|
||||
#endif
|
||||
|
@ -321,6 +321,7 @@ AC_CONFIG_FILES(
|
||||
controller/Makefile
|
||||
tools/Makefile
|
||||
doc/Makefile
|
||||
scripts/Makefile
|
||||
)
|
||||
|
||||
AC_OUTPUT
|
||||
|
@ -149,6 +149,7 @@ func accessCB(rsp http.ResponseWriter, r *http.Request) *appError {
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
rsp.Header().Set("ETag", objID)
|
||||
rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT"))
|
||||
rsp.Header().Set("Cache-Control", "max-age=3600")
|
||||
|
||||
|
@ -5,15 +5,18 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
@ -41,7 +44,7 @@ var pidFilePath string
|
||||
var logFp *os.File
|
||||
|
||||
var dbType string
|
||||
var seafileDB, ccnetDB *sql.DB
|
||||
var seafileDB, ccnetDB, seahubDB *sql.DB
|
||||
|
||||
// when SQLite is used, user and group db are separated.
|
||||
var userDB, groupDB *sql.DB
|
||||
@ -269,6 +272,69 @@ func loadSeafileDB() {
|
||||
dbType = dbEngine
|
||||
}
|
||||
|
||||
func loadSeahubDB() {
|
||||
scriptPath, err := exec.LookPath("parse_seahub_db.py")
|
||||
if err != nil {
|
||||
log.Warnf("Failed to find script of parse_seahub_db.py: %v", err)
|
||||
return
|
||||
}
|
||||
cmd := exec.Command("python3", scriptPath)
|
||||
dbData, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to run python parse_seahub_db.py: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
dbConfig := make(map[string]string)
|
||||
|
||||
err = json.Unmarshal(dbData, &dbConfig)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to decode seahub database json file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
dbEngine := dbConfig["ENGINE"]
|
||||
dbName := dbConfig["NAME"]
|
||||
user := dbConfig["USER"]
|
||||
password := dbConfig["PASSWORD"]
|
||||
host := dbConfig["HOST"]
|
||||
portStr := dbConfig["PORT"]
|
||||
|
||||
if strings.Index(dbEngine, "mysql") >= 0 {
|
||||
port, err := strconv.ParseInt(portStr, 10, 64)
|
||||
if err != nil || port <= 0 {
|
||||
port = 3306
|
||||
}
|
||||
if dbName == "" {
|
||||
log.Warnf("Seahub DB name not set in config")
|
||||
return
|
||||
}
|
||||
if user == "" {
|
||||
log.Warnf("Seahub DB user not set in config")
|
||||
return
|
||||
}
|
||||
if password == "" {
|
||||
log.Warnf("Seahub DB password not set in config")
|
||||
return
|
||||
}
|
||||
if host == "" {
|
||||
log.Warnf("Seahub DB host not set in config")
|
||||
return
|
||||
}
|
||||
|
||||
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t", user, password, host, port, dbName, false)
|
||||
|
||||
seahubDB, err = sql.Open("mysql", dsn)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to open database: %v", err)
|
||||
}
|
||||
} else if strings.Index(dbEngine, "sqlite") >= 0 {
|
||||
return
|
||||
} else {
|
||||
log.Warnf("Unsupported database %s.", dbEngine)
|
||||
}
|
||||
}
|
||||
|
||||
func writePidFile(pid_file_path string) error {
|
||||
file, err := os.OpenFile(pid_file_path, os.O_CREATE|os.O_WRONLY, 0664)
|
||||
if err != nil {
|
||||
@ -367,6 +433,8 @@ func main() {
|
||||
fp.Close()
|
||||
}
|
||||
|
||||
loadSeahubDB()
|
||||
|
||||
repomgr.Init(seafileDB)
|
||||
|
||||
fsmgr.Init(centralDir, dataDir, option.FsCacheLimit)
|
||||
|
@ -12,10 +12,11 @@ import (
|
||||
)
|
||||
|
||||
type mergeOptions struct {
|
||||
remoteRepoID string
|
||||
remoteHead string
|
||||
mergedRoot string
|
||||
conflict bool
|
||||
remoteRepoID string
|
||||
remoteHead string
|
||||
mergedRoot string
|
||||
conflict bool
|
||||
emailToNickname map[string]string
|
||||
}
|
||||
|
||||
func mergeTrees(storeID string, roots []string, opt *mergeOptions) error {
|
||||
@ -24,6 +25,8 @@ func mergeTrees(storeID string, roots []string, opt *mergeOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
opt.emailToNickname = make(map[string]string)
|
||||
|
||||
var trees []*fsmgr.SeafDir
|
||||
for i := 0; i < 3; i++ {
|
||||
dir, err := fsmgr.GetSeafdir(storeID, roots[i])
|
||||
@ -335,7 +338,9 @@ func mergeConflictFileName(storeID string, opt *mergeOptions, baseDir, fileName
|
||||
mtime = time.Now().Unix()
|
||||
}
|
||||
|
||||
conflictName := genConflictPath(fileName, modifier, mtime)
|
||||
nickname := getNickNameByModifier(opt.emailToNickname, modifier)
|
||||
|
||||
conflictName := genConflictPath(fileName, nickname, mtime)
|
||||
|
||||
return conflictName, nil
|
||||
}
|
||||
@ -366,6 +371,29 @@ func genConflictPath(originPath, modifier string, mtime int64) string {
|
||||
return conflictPath
|
||||
}
|
||||
|
||||
func getNickNameByModifier(emailToNickname map[string]string, modifier string) string {
|
||||
if modifier == "" {
|
||||
return ""
|
||||
}
|
||||
nickname, ok := emailToNickname[modifier]
|
||||
if ok {
|
||||
return nickname
|
||||
}
|
||||
if seahubDB != nil {
|
||||
sqlStr := "SELECT nickname from profile_profile WHERE user = ?"
|
||||
row := seahubDB.QueryRow(sqlStr, modifier)
|
||||
row.Scan(&nickname)
|
||||
}
|
||||
|
||||
if nickname == "" {
|
||||
nickname = modifier
|
||||
}
|
||||
|
||||
emailToNickname[modifier] = nickname
|
||||
|
||||
return nickname
|
||||
}
|
||||
|
||||
func getFileModifierMtime(repoID, storeID, head, filePath string) (string, int64, error) {
|
||||
commit, err := commitmgr.Load(repoID, head)
|
||||
if err != nil {
|
||||
|
@ -30,24 +30,27 @@ var mergeTestTree4CommitID string
|
||||
var mergeTestTree5CommitID string
|
||||
|
||||
/*
|
||||
test directory structure:
|
||||
tree1
|
||||
|--bbb
|
||||
|-- testfile(size:1)
|
||||
test directory structure:
|
||||
tree1
|
||||
|--bbb
|
||||
|
||||
tree2
|
||||
|--bbb
|
||||
|-- testfile(size:10)
|
||||
|-- testfile(size:1)
|
||||
|
||||
tree3
|
||||
|--bbb
|
||||
tree2
|
||||
|--bbb
|
||||
|
||||
tree4
|
||||
|--bbb
|
||||
|-- testfile(size:100)
|
||||
|-- testfile(size:10)
|
||||
|
||||
tree5
|
||||
|--
|
||||
tree3
|
||||
|--bbb
|
||||
|
||||
tree4
|
||||
|--bbb
|
||||
|
||||
|-- testfile(size:100)
|
||||
|
||||
tree5
|
||||
|--
|
||||
*/
|
||||
func mergeTestCreateTestDir() error {
|
||||
modeDir := uint32(syscall.S_IFDIR | 0644)
|
||||
|
@ -26,6 +26,7 @@ struct _SeafileSession {
|
||||
GKeyFile *ccnet_config;
|
||||
SeafDB *db;
|
||||
SeafDB *ccnet_db;
|
||||
SeafDB *seahub_db;
|
||||
|
||||
SeafBlockManager *block_mgr;
|
||||
SeafFSManager *fs_mgr;
|
||||
|
@ -295,15 +295,14 @@ class SeafileAPI(object):
|
||||
filename, username, head_id)
|
||||
|
||||
'''
|
||||
If you want to delete multiple files in a batch, @filename should be in
|
||||
the following format: 'filename1\tfilename2\tfilename3'
|
||||
If you want to delete multiple files in a batch, @filename should be json array
|
||||
'''
|
||||
def del_file(self, repo_id, parent_dir, filename, username):
|
||||
return seafserv_threaded_rpc.del_file(repo_id, parent_dir, filename, username)
|
||||
|
||||
'''
|
||||
If you want to move or copy multiple files in a batch, @src_filename and @dst_filename
|
||||
should be in the following format: 'filename1\tfilename2\tfilename3',make sure the number of files
|
||||
should be json array, make sure the number of files
|
||||
in @src_filename and @dst_filename parameters match
|
||||
'''
|
||||
def copy_file(self, src_repo, src_dir, src_filename, dst_repo,
|
||||
|
3
scripts/Makefile.am
Normal file
3
scripts/Makefile.am
Normal file
@ -0,0 +1,3 @@
|
||||
bin_SCRIPTS = parse_seahub_db.py
|
||||
|
||||
EXTRA_DIST = parse_seahub_db.py
|
6
scripts/parse_seahub_db.py
Executable file
6
scripts/parse_seahub_db.py
Executable file
@ -0,0 +1,6 @@
|
||||
import json
|
||||
import seahub_settings
|
||||
|
||||
db_infos = seahub_settings.DATABASES['default']
|
||||
|
||||
print(json.dumps(db_infos))
|
@ -1110,6 +1110,16 @@ start_download_zip_file (evhtp_request_t *req, const char *token,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
set_etag (evhtp_request_t *req,
|
||||
const char *file_id)
|
||||
{
|
||||
evhtp_kv_t *kv;
|
||||
|
||||
kv = evhtp_kv_new ("ETag", file_id, 1, 1);
|
||||
evhtp_kvs_add_kv (req->headers_out, kv);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
can_use_cached_content (evhtp_request_t *req)
|
||||
{
|
||||
@ -1300,6 +1310,8 @@ access_cb(evhtp_request_t *req, void *arg)
|
||||
goto on_error;
|
||||
}
|
||||
|
||||
set_etag (req, data);
|
||||
|
||||
if (can_use_cached_content (req)) {
|
||||
goto success;
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ struct _SeafileSession {
|
||||
GKeyFile *ccnet_config;
|
||||
SeafDB *db;
|
||||
SeafDB *ccnet_db;
|
||||
SeafDB *seahub_db;
|
||||
|
||||
SeafBlockManager *block_mgr;
|
||||
SeafFSManager *fs_mgr;
|
||||
|
@ -932,4 +932,7 @@ seaf_repo_manager_set_repo_status(SeafRepoManager *mgr,
|
||||
int
|
||||
seaf_repo_manager_get_repo_status(SeafRepoManager *mgr,
|
||||
const char *repo_id);
|
||||
|
||||
int
|
||||
seaf_repo_manager_repair_virtual_repo (char *repo_id);
|
||||
#endif
|
||||
|
214
server/repo-op.c
214
server/repo-op.c
@ -2811,14 +2811,14 @@ seaf_repo_manager_copy_multiple_files (SeafRepoManager *mgr,
|
||||
dst_names = json_to_file_list (dst_filenames);
|
||||
if (!src_names || !dst_names) {
|
||||
ret = -1;
|
||||
seaf_warning ("[copy files] Bad args: Load filenames to json failed.\n");
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Load filenames to json failed");
|
||||
goto out;
|
||||
}
|
||||
file_num = g_list_length (src_names);
|
||||
int dst_file_num = g_list_length (dst_names);
|
||||
if (dst_file_num != file_num) {
|
||||
ret = -1;
|
||||
seaf_warning ("[copy files] Bad args.\n");
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "The number of files in the parameters does not match");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2848,7 +2848,7 @@ seaf_repo_manager_copy_multiple_files (SeafRepoManager *mgr,
|
||||
name = ptr->data;
|
||||
if (strcmp(name, "") == 0) {
|
||||
ret = -1;
|
||||
seaf_warning ("[copy files] Bad args: Empty src_filenames.\n");
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty src_filenames");
|
||||
goto out;
|
||||
}
|
||||
src_dents[i] = g_hash_table_lookup(dirent_hash, name);
|
||||
@ -2867,7 +2867,7 @@ seaf_repo_manager_copy_multiple_files (SeafRepoManager *mgr,
|
||||
name = ptr->data;
|
||||
if (strcmp(name, "") == 0) {
|
||||
ret = -1;
|
||||
seaf_warning ("[copy files] Bad args: Empty dst_filenames.\n");
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty dst_filenames");
|
||||
goto out;
|
||||
}
|
||||
/* duplicate src dirents with new names */
|
||||
@ -2970,6 +2970,7 @@ out:
|
||||
|
||||
static int
|
||||
move_file_same_repo (const char *repo_id,
|
||||
const char *src_filenames,
|
||||
const char *src_path, SeafDirent *src_dents[],
|
||||
const char *dst_path, SeafDirent *dst_dents[],
|
||||
int file_num,
|
||||
@ -2982,26 +2983,17 @@ move_file_same_repo (const char *repo_id,
|
||||
char *root_id_after_put = NULL, *root_id = NULL;
|
||||
char buf[SEAF_PATH_MAX];
|
||||
int ret = 0, i = 0;
|
||||
GString *filenames_str = NULL;
|
||||
|
||||
GET_REPO_OR_FAIL(repo, repo_id);
|
||||
GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);
|
||||
|
||||
filenames_str = g_string_new ("");
|
||||
root_id_after_put = head_commit->root_id;
|
||||
|
||||
GList *dent_list = NULL;
|
||||
GList *name_list = NULL;
|
||||
g_string_append_printf (filenames_str, "[");
|
||||
for (i = 0; i < file_num; i++) {
|
||||
dent_list = g_list_append (dent_list, dst_dents[i]);
|
||||
if (i > 0) {
|
||||
g_string_append_printf (filenames_str, ", \"%s\"", src_dents[i]->name);
|
||||
} else {
|
||||
g_string_append_printf (filenames_str, "\"%s\"", src_dents[i]->name);
|
||||
}
|
||||
}
|
||||
g_string_append_printf (filenames_str, "]");
|
||||
if (*dst_path == '/')
|
||||
dst_path = dst_path + 1;
|
||||
|
||||
@ -3015,7 +3007,7 @@ move_file_same_repo (const char *repo_id,
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
root_id = do_del_file (repo, root_id_after_put, src_path, filenames_str->str,
|
||||
root_id = do_del_file (repo, root_id_after_put, src_path, src_filenames,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
if (!root_id) {
|
||||
@ -3043,8 +3035,6 @@ out:
|
||||
seaf_repo_unref (repo);
|
||||
if (head_commit)
|
||||
seaf_commit_unref (head_commit);
|
||||
if (filenames_str)
|
||||
g_string_free (filenames_str, TRUE);
|
||||
|
||||
g_free (root_id_after_put);
|
||||
g_free (root_id);
|
||||
@ -3260,189 +3250,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
SeafileCopyResult *
|
||||
seaf_repo_manager_move_file (SeafRepoManager *mgr,
|
||||
const char *src_repo_id,
|
||||
const char *src_path,
|
||||
const char *src_filename,
|
||||
const char *dst_repo_id,
|
||||
const char *dst_path,
|
||||
const char *dst_filename,
|
||||
int replace,
|
||||
const char *user,
|
||||
int need_progress,
|
||||
int synchronous,
|
||||
GError **error)
|
||||
{
|
||||
SeafRepo *src_repo = NULL, *dst_repo = NULL;
|
||||
SeafDirent *src_dent = NULL, *dst_dent = NULL;
|
||||
char *src_canon_path = NULL, *dst_canon_path = NULL;
|
||||
SeafCommit *dst_head_commit = NULL;
|
||||
int ret = 0;
|
||||
gboolean background = FALSE;
|
||||
char *task_id = NULL;
|
||||
SeafileCopyResult *res = NULL;
|
||||
|
||||
GET_REPO_OR_FAIL(src_repo, src_repo_id);
|
||||
|
||||
if (strcmp(src_repo_id, dst_repo_id) != 0) {
|
||||
GET_REPO_OR_FAIL(dst_repo, dst_repo_id);
|
||||
|
||||
if (src_repo->encrypted || dst_repo->encrypted) {
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
|
||||
"Can't copy files between encrypted repo(s)");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
} else {
|
||||
seaf_repo_ref (src_repo);
|
||||
dst_repo = src_repo;
|
||||
}
|
||||
|
||||
src_canon_path = get_canonical_path (src_path);
|
||||
dst_canon_path = get_canonical_path (dst_path);
|
||||
/* first check whether a file with file_name already exists in destination dir */
|
||||
GET_COMMIT_OR_FAIL(dst_head_commit,
|
||||
dst_repo->id, dst_repo->version,
|
||||
dst_repo->head->commit_id);
|
||||
/*FAIL_IF_FILE_EXISTS(dst_repo->store_id, dst_repo->version,
|
||||
dst_head_commit->root_id, dst_canon_path, dst_filename, NULL);*/
|
||||
|
||||
/* same repo */
|
||||
if (src_repo == dst_repo ) {
|
||||
/* get src dirent */
|
||||
src_dent = get_dirent_by_path (src_repo, NULL,
|
||||
src_canon_path, src_filename, error);
|
||||
if (!src_dent) {
|
||||
seaf_warning("[move file] file %s/%s doesn't exist.\n", src_canon_path, src_filename);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
gint64 file_size = (src_dent->version > 0) ? src_dent->size : -1;
|
||||
|
||||
/* duplicate src dirent with new name */
|
||||
dst_dent = seaf_dirent_new (dir_version_from_repo_version (dst_repo->version),
|
||||
src_dent->id, src_dent->mode, dst_filename,
|
||||
src_dent->mtime, user, file_size);
|
||||
|
||||
/* move file within the same repo */
|
||||
if (move_file_same_repo (src_repo_id,
|
||||
src_canon_path, &src_dent,
|
||||
dst_canon_path, &dst_dent,
|
||||
1, replace, user, error) < 0) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seaf_repo_manager_merge_virtual_repo (mgr, src_repo_id, NULL);
|
||||
|
||||
update_repo_size (dst_repo_id);
|
||||
} else {
|
||||
/* move between different repos */
|
||||
/* virtual repo */
|
||||
if (is_virtual_repo_and_origin (src_repo, dst_repo)) {
|
||||
/* get src dirent */
|
||||
src_dent = get_dirent_by_path (src_repo, NULL,
|
||||
src_canon_path, src_filename, error);
|
||||
if (!src_dent) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
gint64 file_size = (src_dent->version > 0) ? src_dent->size : -1;
|
||||
|
||||
/* duplicate src dirent with new name */
|
||||
dst_dent = seaf_dirent_new (dir_version_from_repo_version(dst_repo->version),
|
||||
src_dent->id, src_dent->mode, dst_filename,
|
||||
src_dent->mtime, user, file_size);
|
||||
|
||||
/* add this dirent to dst repo */
|
||||
if (put_dirent_and_commit (dst_repo,
|
||||
dst_canon_path,
|
||||
&dst_dent,
|
||||
1,
|
||||
replace,
|
||||
user,
|
||||
error) < 0) {
|
||||
if (!error)
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
|
||||
"failed to put dirent");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seaf_repo_manager_merge_virtual_repo (mgr, dst_repo_id, NULL);
|
||||
|
||||
if (seaf_repo_manager_del_file (mgr, src_repo_id, src_path,
|
||||
src_filename, user, error) < 0) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seaf_repo_manager_merge_virtual_repo (mgr, src_repo_id, NULL);
|
||||
|
||||
update_repo_size (dst_repo_id);
|
||||
} else if (!synchronous) {
|
||||
background = TRUE;
|
||||
|
||||
task_id = seaf_copy_manager_add_task (seaf->copy_mgr,
|
||||
src_repo_id,
|
||||
src_canon_path,
|
||||
src_filename,
|
||||
dst_repo_id,
|
||||
dst_canon_path,
|
||||
dst_filename,
|
||||
replace,
|
||||
user,
|
||||
cross_repo_move,
|
||||
need_progress);
|
||||
if (need_progress && !task_id) {
|
||||
seaf_warning ("Failed to start copy task.\n");
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
|
||||
"failed to start copy task");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
/* Synchronous for cross-repo move */
|
||||
if (cross_repo_move (src_repo_id,
|
||||
src_canon_path,
|
||||
src_filename,
|
||||
dst_repo_id,
|
||||
dst_canon_path,
|
||||
dst_filename,
|
||||
replace,
|
||||
user,
|
||||
NULL) < 0) {
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
|
||||
"Failed to move");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (src_repo) seaf_repo_unref (src_repo);
|
||||
if (dst_repo) seaf_repo_unref (dst_repo);
|
||||
|
||||
if (dst_head_commit) seaf_commit_unref(dst_head_commit);
|
||||
|
||||
if (src_canon_path) g_free (src_canon_path);
|
||||
if (dst_canon_path) g_free (dst_canon_path);
|
||||
|
||||
seaf_dirent_free(src_dent);
|
||||
seaf_dirent_free(dst_dent);
|
||||
|
||||
if (ret == 0) {
|
||||
res = seafile_copy_result_new ();
|
||||
g_object_set (res, "background", background, "task_id", task_id, NULL);
|
||||
g_free (task_id);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
check_move (SeafRepo *src_repo, SeafRepo *dst_repo,
|
||||
const char *src_path, const char *dst_path,
|
||||
@ -3531,7 +3338,7 @@ seaf_repo_manager_move_multiple_files (SeafRepoManager *mgr,
|
||||
dst_names = json_to_file_list (dst_filenames);
|
||||
if (!src_names || !dst_names) {
|
||||
ret = -1;
|
||||
seaf_warning ("[move files] Bad args: Load filenames to json failed.\n");
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Load filenames to json failed");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -3539,7 +3346,7 @@ seaf_repo_manager_move_multiple_files (SeafRepoManager *mgr,
|
||||
int dst_file_num = g_list_length (dst_names);
|
||||
if (dst_file_num != file_num) {
|
||||
ret = -1;
|
||||
seaf_warning ("[move files] Bad args.\n");
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "The number of files in the parameters does not match");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -3568,7 +3375,7 @@ seaf_repo_manager_move_multiple_files (SeafRepoManager *mgr,
|
||||
name = ptr->data;
|
||||
if (strcmp(name, "") == 0) {
|
||||
ret = -1;
|
||||
seaf_warning ("[move files] Bad args: Empty src_filenames.\n");
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty src_filenames");
|
||||
goto out;
|
||||
}
|
||||
src_dents[i] = g_hash_table_lookup(dirent_hash, name);
|
||||
@ -3587,7 +3394,7 @@ seaf_repo_manager_move_multiple_files (SeafRepoManager *mgr,
|
||||
name = ptr->data;
|
||||
if (strcmp(name, "") == 0) {
|
||||
ret = -1;
|
||||
seaf_warning ("[move files] Bad args: Empty dst_filenames.\n");
|
||||
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty dst_filenames");
|
||||
goto out;
|
||||
}
|
||||
/* duplicate src dirents with new names */
|
||||
@ -3599,6 +3406,7 @@ seaf_repo_manager_move_multiple_files (SeafRepoManager *mgr,
|
||||
/* move file within the same repo */
|
||||
if (src_repo == dst_repo) {
|
||||
if (move_file_same_repo (src_repo_id,
|
||||
src_filenames,
|
||||
src_canon_path, src_dents,
|
||||
dst_canon_path, dst_dents,
|
||||
file_num, replace, user, error) < 0) {
|
||||
|
@ -32,7 +32,7 @@ SeafileSession *seaf;
|
||||
|
||||
char *pidfile = NULL;
|
||||
|
||||
static const char *short_options = "hvc:d:l:fP:D:F:p:t";
|
||||
static const char *short_options = "hvc:d:l:fP:D:F:p:tr:";
|
||||
static struct option long_options[] = {
|
||||
{ "help", no_argument, NULL, 'h', },
|
||||
{ "version", no_argument, NULL, 'v', },
|
||||
@ -45,6 +45,7 @@ static struct option long_options[] = {
|
||||
{ "pidfile", required_argument, NULL, 'P' },
|
||||
{ "rpc-pipe-path", required_argument, NULL, 'p' },
|
||||
{ "test-config", no_argument, NULL, 't' },
|
||||
{ "repair-repo", required_argument, NULL, 'r' },
|
||||
{ NULL, 0, NULL, 0, },
|
||||
};
|
||||
|
||||
@ -1211,6 +1212,7 @@ main (int argc, char **argv)
|
||||
const char *debug_str = NULL;
|
||||
int daemon_mode = 1;
|
||||
gboolean test_config = FALSE;
|
||||
char *repo_id = NULL;
|
||||
|
||||
#ifdef WIN32
|
||||
argv = get_argv_utf8 (&argc);
|
||||
@ -1253,6 +1255,9 @@ main (int argc, char **argv)
|
||||
case 't':
|
||||
test_config = TRUE;
|
||||
break;
|
||||
case 'r':
|
||||
repo_id = g_strdup (optarg);
|
||||
break;
|
||||
default:
|
||||
usage ();
|
||||
exit (1);
|
||||
@ -1315,6 +1320,16 @@ main (int argc, char **argv)
|
||||
|
||||
event_init ();
|
||||
|
||||
if (repo_id) {
|
||||
seaf = seafile_repair_session_new (central_config_dir, seafile_dir, ccnet_dir);
|
||||
if (!seaf) {
|
||||
seaf_warning ("Failed to create repair seafile session.\n");
|
||||
exit (1);
|
||||
}
|
||||
seaf_repo_manager_repair_virtual_repo (repo_id);
|
||||
exit (0);
|
||||
}
|
||||
|
||||
seaf = seafile_session_new (central_config_dir, seafile_dir, ccnet_dir);
|
||||
if (!seaf) {
|
||||
seaf_warning ("Failed to create seafile session.\n");
|
||||
|
@ -218,6 +218,8 @@ seafile_session_new(const char *central_config_dir,
|
||||
goto onerror;
|
||||
}
|
||||
|
||||
load_seahub_database_config (session);
|
||||
|
||||
session->cfg_mgr = seaf_cfg_manager_new (session);
|
||||
if (!session->cfg_mgr)
|
||||
goto onerror;
|
||||
@ -313,6 +315,113 @@ onerror:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SeafileSession *
|
||||
seafile_repair_session_new(const char *central_config_dir,
|
||||
const char *seafile_dir,
|
||||
const char *ccnet_dir)
|
||||
{
|
||||
char *abs_central_config_dir = NULL;
|
||||
char *abs_seafile_dir;
|
||||
char *abs_ccnet_dir = NULL;
|
||||
char *tmp_file_dir;
|
||||
char *config_file_path;
|
||||
char *config_file_ccnet;
|
||||
GKeyFile *config;
|
||||
GKeyFile *ccnet_config;
|
||||
SeafileSession *session = NULL;
|
||||
gboolean notif_enabled = FALSE;
|
||||
int notif_port = 8083;
|
||||
gboolean cluster_mode;
|
||||
gboolean use_block_cache;
|
||||
int block_cache_size_limit;
|
||||
char **block_cache_file_types;
|
||||
gint64 repo_file_number_limit = -1;
|
||||
|
||||
abs_ccnet_dir = ccnet_expand_path (ccnet_dir);
|
||||
abs_seafile_dir = ccnet_expand_path (seafile_dir);
|
||||
tmp_file_dir = g_build_filename (abs_seafile_dir, "tmpfiles", NULL);
|
||||
if (central_config_dir) {
|
||||
abs_central_config_dir = ccnet_expand_path (central_config_dir);
|
||||
}
|
||||
|
||||
config_file_path = g_build_filename(
|
||||
abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir,
|
||||
"seafile.conf", NULL);
|
||||
|
||||
config_file_ccnet = g_build_filename(
|
||||
abs_central_config_dir ? abs_central_config_dir : abs_ccnet_dir,
|
||||
"ccnet.conf", NULL);
|
||||
|
||||
GError *error = NULL;
|
||||
config = g_key_file_new ();
|
||||
if (!g_key_file_load_from_file (config, config_file_path,
|
||||
G_KEY_FILE_NONE, &error)) {
|
||||
seaf_warning ("Failed to load config file.\n");
|
||||
g_key_file_free (config);
|
||||
g_free (config_file_path);
|
||||
goto onerror;
|
||||
}
|
||||
ccnet_config = g_key_file_new ();
|
||||
g_key_file_set_list_separator (ccnet_config, ',');
|
||||
if (!g_key_file_load_from_file (ccnet_config, config_file_ccnet,
|
||||
G_KEY_FILE_KEEP_COMMENTS, NULL))
|
||||
{
|
||||
seaf_warning ("Can't load ccnet config file %s.\n", config_file_ccnet);
|
||||
g_key_file_free (ccnet_config);
|
||||
g_free (config_file_ccnet);
|
||||
goto onerror;
|
||||
}
|
||||
g_free (config_file_path);
|
||||
g_free (config_file_ccnet);
|
||||
|
||||
session = g_new0(SeafileSession, 1);
|
||||
session->seaf_dir = abs_seafile_dir;
|
||||
session->ccnet_dir = abs_ccnet_dir;
|
||||
session->tmp_file_dir = tmp_file_dir;
|
||||
session->config = config;
|
||||
session->ccnet_config = ccnet_config;
|
||||
session->is_repair = TRUE;
|
||||
|
||||
if (load_database_config (session) < 0) {
|
||||
seaf_warning ("Failed to load database config.\n");
|
||||
goto onerror;
|
||||
}
|
||||
|
||||
if (load_ccnet_database_config (session) < 0) {
|
||||
seaf_warning ("Failed to load ccnet database config.\n");
|
||||
goto onerror;
|
||||
}
|
||||
|
||||
session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir);
|
||||
if (!session->fs_mgr)
|
||||
goto onerror;
|
||||
session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir);
|
||||
if (!session->block_mgr)
|
||||
goto onerror;
|
||||
session->commit_mgr = seaf_commit_manager_new (session);
|
||||
if (!session->commit_mgr)
|
||||
goto onerror;
|
||||
session->repo_mgr = seaf_repo_manager_new (session);
|
||||
if (!session->repo_mgr)
|
||||
goto onerror;
|
||||
session->branch_mgr = seaf_branch_manager_new (session);
|
||||
if (!session->branch_mgr)
|
||||
goto onerror;
|
||||
|
||||
session->job_mgr = ccnet_job_manager_new (DEFAULT_THREAD_POOL_SIZE);
|
||||
|
||||
session->size_sched = size_scheduler_new (session);
|
||||
|
||||
return session;
|
||||
|
||||
onerror:
|
||||
free (abs_seafile_dir);
|
||||
free (abs_ccnet_dir);
|
||||
g_free (tmp_file_dir);
|
||||
g_free (session);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
seafile_session_init (SeafileSession *session)
|
||||
{
|
||||
|
@ -47,6 +47,7 @@ struct _SeafileSession {
|
||||
GKeyFile *ccnet_config;
|
||||
SeafDB *db;
|
||||
CcnetDB *ccnet_db;
|
||||
SeafDB *seahub_db;
|
||||
|
||||
SeafBlockManager *block_mgr;
|
||||
SeafFSManager *fs_mgr;
|
||||
@ -90,6 +91,8 @@ struct _SeafileSession {
|
||||
// For notification server
|
||||
NotifManager *notif_mgr;
|
||||
char *private_key;
|
||||
|
||||
gboolean is_repair;
|
||||
};
|
||||
|
||||
extern SeafileSession *seaf;
|
||||
@ -98,6 +101,12 @@ SeafileSession *
|
||||
seafile_session_new(const char *central_config_dir,
|
||||
const char *seafile_dir,
|
||||
const char *ccnet_dir);
|
||||
|
||||
SeafileSession *
|
||||
seafile_repair_session_new(const char *central_config_dir,
|
||||
const char *seafile_dir,
|
||||
const char *ccnet_dir);
|
||||
|
||||
int
|
||||
seafile_session_init (SeafileSession *session);
|
||||
|
||||
|
@ -1147,3 +1147,153 @@ seaf_repo_manager_init_merge_scheduler ()
|
||||
SCHEDULE_INTERVAL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
seaf_repo_manager_repair_virtual_repo (char *repo_id)
|
||||
{
|
||||
SeafRepoManager *mgr = seaf->repo_mgr;
|
||||
SeafVirtRepo *vinfo = NULL;
|
||||
SeafRepo *repo = NULL, *orig_repo = NULL;
|
||||
SeafCommit *head = NULL, *orig_head = NULL;
|
||||
char *root = NULL, *orig_root = NULL;
|
||||
char new_base_commit[41] = {0};
|
||||
int ret = 0;
|
||||
GError *error = NULL;
|
||||
|
||||
/* repos */
|
||||
repo = seaf_repo_manager_get_repo (mgr, repo_id);
|
||||
if (!repo) {
|
||||
seaf_warning ("Failed to get virt repo %.10s.\n", repo_id);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!repo->virtual_info) {
|
||||
seaf_warning ("Repo %.10s is not a virtual repo.\n", repo_id);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id);
|
||||
if (!vinfo) {
|
||||
seaf_warning ("Failed to get virt repo info %.10s.\n", repo_id);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
orig_repo = seaf_repo_manager_get_repo (mgr, vinfo->origin_repo_id);
|
||||
if (!orig_repo) {
|
||||
seaf_warning ("Failed to get orig repo %.10s.\n", vinfo->origin_repo_id);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* commits */
|
||||
head = seaf_commit_manager_get_commit (seaf->commit_mgr,
|
||||
repo->id, repo->version,
|
||||
repo->head->commit_id);
|
||||
if (!head) {
|
||||
seaf_warning ("Failed to get virtual repo commit %s:%.8s.\n",
|
||||
repo->id, repo->head->commit_id);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
orig_head = seaf_commit_manager_get_commit (seaf->commit_mgr,
|
||||
orig_repo->id, orig_repo->version,
|
||||
orig_repo->head->commit_id);
|
||||
if (!orig_head) {
|
||||
seaf_warning ("Failed to get origin repo commit %s:%.8s.\n",
|
||||
orig_repo->id, orig_repo->head->commit_id);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
orig_root = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr,
|
||||
orig_repo->store_id,
|
||||
orig_repo->version,
|
||||
orig_head->root_id,
|
||||
vinfo->path,
|
||||
&error);
|
||||
if (error &&
|
||||
!g_error_matches(error,
|
||||
SEAFILE_DOMAIN,
|
||||
SEAF_ERR_PATH_NO_EXIST)) {
|
||||
seaf_warning ("Failed to get seafdir id by path in origin repo %.10s: %s.\n", orig_repo->store_id, error->message);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
if (!orig_root) {
|
||||
seaf_message("Path %s not found in origin repo %.8s, delete or rename virtual repo %.8s\n",
|
||||
vinfo->path, vinfo->origin_repo_id, repo_id);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* fs roots */
|
||||
root = head->root_id;
|
||||
|
||||
MergeOptions opt;
|
||||
const char *roots[2];
|
||||
|
||||
memset (&opt, 0, sizeof(opt));
|
||||
opt.n_ways = 2;
|
||||
memcpy (opt.remote_repo_id, repo_id, 36);
|
||||
memcpy (opt.remote_head, head->commit_id, 40);
|
||||
|
||||
roots[0] = orig_root;
|
||||
roots[1] = root;
|
||||
|
||||
/* Merge virtual into origin */
|
||||
if (seaf_merge_trees (orig_repo->store_id, orig_repo->version,
|
||||
2, roots, &opt) < 0) {
|
||||
seaf_warning ("Failed to merge virtual repo %.10s.\n", repo_id);
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seaf_debug ("Number of dirs visted in merge: %d.\n", opt.visit_dirs);
|
||||
|
||||
/* Update virtual repo root. */
|
||||
ret = seaf_repo_manager_update_dir (mgr,
|
||||
repo_id,
|
||||
"/",
|
||||
opt.merged_tree_root,
|
||||
orig_head->creator_name,
|
||||
head->commit_id,
|
||||
NULL,
|
||||
NULL);
|
||||
if (ret < 0) {
|
||||
seaf_warning ("Failed to update root of virtual repo %.10s.\n",
|
||||
repo_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Update origin repo path. */
|
||||
ret = seaf_repo_manager_update_dir (mgr,
|
||||
vinfo->origin_repo_id,
|
||||
vinfo->path,
|
||||
opt.merged_tree_root,
|
||||
head->creator_name,
|
||||
orig_head->commit_id,
|
||||
new_base_commit,
|
||||
NULL);
|
||||
if (ret < 0) {
|
||||
seaf_warning ("Failed to update origin repo %.10s path %s.\n",
|
||||
vinfo->origin_repo_id, vinfo->path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
set_virtual_repo_base_commit_path (repo->id, new_base_commit, vinfo->path);
|
||||
|
||||
out:
|
||||
if (error)
|
||||
g_clear_error (&error);
|
||||
seaf_virtual_repo_info_free (vinfo);
|
||||
seaf_repo_unref (repo);
|
||||
seaf_repo_unref (orig_repo);
|
||||
seaf_commit_unref (head);
|
||||
seaf_commit_unref (orig_head);
|
||||
g_free (orig_root);
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user