1
0
mirror of https://github.com/haiwen/seahub.git synced 2025-04-27 11:01:14 +00:00

new pro scripts

This commit is contained in:
skywalker 2021-12-03 16:53:27 +08:00
parent b6d251e44f
commit 5a8ca8737f
41 changed files with 4047 additions and 36 deletions

1127
scripts/build/build-pro.py Executable file

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
# coding: UTF-8
'''This script builds the seafile server tarball.

69
scripts/build/office.py Normal file
View File

@ -0,0 +1,69 @@
import os
import sys
import subprocess
import shutil
def get_dependent_libs(executable):
syslibs = ['libsearpc', 'libccnet', 'libseafile', 'libpthread.so', 'libc.so', 'libm.so', 'librt.so', 'libdl.so', 'libselinux.so', 'libresolv.so' ]
def is_syslib(lib):
for syslib in syslibs:
if syslib in lib:
return True
return False
ldd_output = subprocess.getoutput('ldd %s' % executable)
ret = []
for line in ldd_output.splitlines():
tokens = line.split()
if len(tokens) != 4:
continue
if is_syslib(tokens[0]):
continue
ret.append(tokens[2])
return ret
def prepend_env_value(name, value, seperator=':'):
'''append a new value to a list'''
try:
current_value = os.environ[name]
except KeyError:
current_value = ''
new_value = value
if current_value:
new_value += seperator + current_value
os.environ[name] = new_value
def main():
prepend_env_value ('LD_LIBRARY_PATH',
'/tmp/seafile-pro-server-build/seafile-pro-server-1.6.5/seafile/lib')
destdir = sys.argv[1]
dest_libdir = os.path.join(destdir, 'lib')
dest_bindir = os.path.join(destdir, 'bin')
for d in (dest_bindir, dest_libdir):
if not os.path.exists(d):
os.makedirs(d)
elif not os.path.isdir(d):
raise RuntimeError('"%s" is not a directory!' % d)
bindir = '/tmp/seafile-pro-server-build/seafile-pro-server-1.6.5/seafile/bin'
httpserver = os.path.join(bindir, 'httpserver')
pdf2htmlEX = os.path.join(bindir, 'pdf2htmlEX')
httpserver_libs = get_dependent_libs(httpserver)
pdf2htmlEX_libs = get_dependent_libs(pdf2htmlEX)
needed_libs = set(pdf2htmlEX_libs) - set(httpserver_libs)
for lib in needed_libs:
dest = os.path.join(dest_libdir, os.path.basename(lib))
if not os.path.exists(dest):
shutil.copy(lib, dest)
shutil.copy(pdf2htmlEX, dest_bindir)
if __name__ == '__main__':
main()

23
scripts/check-db-type.py Normal file
View File

@ -0,0 +1,23 @@
import sys
from configparser import ConfigParser
if len(sys.argv) != 2:
print('check-db-type.py <seafile-config-file>', file=sys.stderr)
seafile_conf_file = sys.argv[1]
parser = ConfigParser()
parser.read(seafile_conf_file)
if not parser.has_option('database', 'type'):
print('sqlite')
else:
db_type = parser.get('database', 'type')
if db_type == 'sqlite':
print('sqlite')
elif db_type == 'mysql':
print('mysql')
elif db_type == 'pgsql':
print('pgsql')
else:
print('unknown')

View File

@ -0,0 +1,71 @@
-- User db tables
CREATE TABLE EmailUser (
id int PRIMARY KEY,
email varchar2(255),
passwd varchar2(256),
is_staff int,
is_active int,
ctime number,
UNIQUE (email)
);
CREATE SEQUENCE EmailUsersIdSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
CREATE TABLE LDAPUsers (
id int PRIMARY KEY,
email varchar2(255),
password varchar2(256),
is_staff int,
is_active int,
extra_attrs varchar2(1024),
UNIQUE (email)
);
CREATE SEQUENCE LDAPUsersIdSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
CREATE TABLE UserRole (
email varchar2(255),
role varchar2(255),
UNIQUE (email, role)
);
-- Group db tables
CREATE TABLE "Group" (
group_id int PRIMARY KEY,
group_name varchar2(255),
creator_name varchar2(255),
timestamp number,
type varchar2(32)
);
CREATE SEQUENCE GroupIdSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
CREATE TABLE GroupUser (
group_id int,
user_name varchar2(255),
is_staff int,
UNIQUE (group_id, user_name)
);
CREATE INDEX GroupUserNameIndex ON GroupUser (user_name);
CREATE TABLE GroupDNPair (
group_id int,
dn varchar2(255)
);
-- Org db tables
CREATE TABLE Organization (
org_id int PRIMARY KEY,
org_name varchar2(255),
url_prefix varchar2(255),
creator varchar2(255),
ctime number,
UNIQUE (url_prefix)
);
CREATE SEQUENCE OrgIdSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
CREATE TABLE OrgUser (
org_id int,
email varchar2(255),
is_staff int,
UNIQUE (org_id, email)
);
CREATE INDEX OrgUserEmailIndex ON OrgUser (email);
CREATE TABLE OrgGroup (
org_id int,
group_id int,
UNIQUE (org_id, group_id)
);
CREATE INDEX OrgGroupIdIndex ON OrgGroup (group_id);

View File

@ -0,0 +1,228 @@
CREATE TABLE Branch (
name varchar2(10),
repo_id char(36),
commit_id char(40),
PRIMARY KEY (repo_id,name)
);
CREATE TABLE FileLockTimestamp (
repo_id char(36),
update_time number,
PRIMARY KEY (repo_id)
);
CREATE TABLE FileLocks (
repo_id char(36),
path varchar2(1024 char),
user_name varchar2(255),
lock_time number,
expire number
);
CREATE INDEX FileLocksIndex ON FileLocks (repo_id);
CREATE TABLE FolderGroupPerm (
repo_id char(36),
path varchar2(1024 char),
permission varchar2(15),
group_id int
);
CREATE INDEX FolderGroupPermIndex ON FolderGroupPerm (repo_id);
CREATE TABLE FolderPermTimestamp (
repo_id char(36),
timestamp number,
PRIMARY KEY (repo_id)
);
CREATE TABLE FolderUserPerm (
repo_id char(36),
path varchar2(1024 char),
permission varchar2(15),
"user" varchar2(255)
);
CREATE INDEX FolderUserPermIndex ON FolderUserPerm (repo_id);
CREATE TABLE GCID (
repo_id char(36),
gc_id char(36),
PRIMARY KEY (repo_id)
);
CREATE TABLE GarbageRepos (
repo_id char(36),
PRIMARY KEY (repo_id)
);
CREATE TABLE InnerPubRepo (
repo_id char(36),
permission varchar2(15),
PRIMARY KEY (repo_id)
);
CREATE TABLE LastGCID (
repo_id char(36),
client_id varchar2(128),
gc_id char(36),
PRIMARY KEY (repo_id,client_id)
);
CREATE TABLE OrgGroupRepo (
org_id int,
repo_id char(36),
group_id int,
owner varchar2(255),
permission varchar2(15),
PRIMARY KEY (org_id,group_id,repo_id)
);
CREATE INDEX OrgGroupRepoIdIndex ON OrgGroupRepo (repo_id);
CREATE INDEX OrgGroupRepoOwnerIndex ON OrgGroupRepo (owner);
CREATE TABLE OrgInnerPubRepo (
org_id int,
repo_id char(36),
permission varchar2(15),
PRIMARY KEY (org_id,repo_id)
);
CREATE TABLE OrgQuota (
org_id int,
quota number,
PRIMARY KEY (org_id)
);
CREATE TABLE OrgRepo (
org_id int,
repo_id char(36),
"user" varchar2(255),
PRIMARY KEY (org_id,repo_id),
UNIQUE (repo_id)
);
CREATE INDEX OrgRepoOrgIdIndex ON OrgRepo (org_id, "user");
CREATE TABLE OrgSharedRepo (
id int,
org_id int,
repo_id char(36),
from_email varchar2(255),
to_email varchar2(255),
permission varchar2(15),
PRIMARY KEY (id)
);
CREATE SEQUENCE OrgSharedRepoSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
CREATE INDEX OrgSharedRepoIdIndex ON OrgSharedRepo (org_id, repo_id);
CREATE INDEX OrgSharedRepoFromEmailIndex ON OrgSharedRepo (from_email);
CREATE INDEX OrgSharedRepoToEmailIndex ON OrgSharedRepo (to_email);
CREATE TABLE OrgUserQuota (
org_id int,
"user" varchar2(255),
quota number,
PRIMARY KEY (org_id,"user")
);
CREATE TABLE Repo (
repo_id char(36),
PRIMARY KEY (repo_id)
);
CREATE TABLE RepoFileCount (
repo_id char(36),
file_count number,
PRIMARY KEY (repo_id)
);
CREATE TABLE RepoGroup (
repo_id char(36),
group_id int,
user_name varchar2(255),
permission varchar2(15),
PRIMARY KEY (group_id,repo_id)
);
CREATE INDEX RepoGroupIdIndex ON RepoGroup (repo_id);
CREATE INDEX RepoGroupUsernameIndex ON RepoGroup (user_name);
CREATE TABLE RepoHead (
repo_id char(36),
branch_name varchar2(10),
PRIMARY KEY (repo_id)
);
CREATE TABLE RepoHistoryLimit (
repo_id char(36),
days int,
PRIMARY KEY (repo_id)
);
CREATE TABLE RepoOwner (
repo_id char(36),
owner_id varchar2(255),
PRIMARY KEY (repo_id)
);
CREATE INDEX RepoOwnerNameIndex ON RepoOwner (owner_id);
CREATE TABLE RepoSize (
repo_id char(36),
"size" number,
head_id char(40),
PRIMARY KEY (repo_id)
);
CREATE TABLE RepoSyncError (
token char(40),
error_time number,
error_con varchar2(50),
PRIMARY KEY (token)
);
CREATE TABLE RepoTokenPeerInfo (
token char(40),
peer_id char(40),
peer_ip varchar2(40),
peer_name varchar2(255),
sync_time number,
client_ver varchar2(20),
PRIMARY KEY (token)
);
CREATE TABLE RepoTrash (
repo_id char(36),
repo_name varchar2(255),
head_id char(40),
owner_id varchar2(255),
"size" number,
org_id int,
del_time number,
PRIMARY KEY (repo_id)
);
CREATE INDEX RepoTrashOwnerIndex ON RepoTrash (owner_id);
CREATE INDEX RepoTrashOrgIdIndex ON RepoTrash (org_id);
CREATE TABLE RepoUserToken (
repo_id char(36),
email varchar2(255),
token char(40),
PRIMARY KEY (repo_id,token)
);
CREATE INDEX RepoUserTokenEmailIndex ON RepoUserToken (email);
CREATE TABLE RepoValidSince (
repo_id char(36),
timestamp number,
PRIMARY KEY (repo_id)
);
CREATE TABLE SharedRepo (
id int,
repo_id char(36),
from_email varchar2(255),
to_email varchar2(255),
permission varchar2(15)
);
CREATE SEQUENCE SharedRepoSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
CREATE INDEX SharedRepoIdIndex ON SharedRepo (repo_id);
CREATE INDEX SharedRepoFromEmailIndex ON SharedRepo (from_email);
CREATE INDEX SharedRepoToEmailIndex ON SharedRepo (to_email);
CREATE TABLE SystemInfo (
info_key varchar2(256) PRIMARY KEY,
info_value varchar2(1024)
);
CREATE TABLE UserQuota (
"user" varchar2(255),
quota number,
PRIMARY KEY ("user")
);
CREATE TABLE UserShareQuota (
"user" varchar2(255),
quota number,
PRIMARY KEY ("user")
);
CREATE TABLE RoleQuota (
role varchar2(255),
quota number,
PRIMARY KEY (role)
);
CREATE TABLE VirtualRepo (
repo_id char(36),
origin_repo char(36),
path varchar2(1024 char),
base_commit char(40),
PRIMARY KEY (repo_id)
);
CREATE INDEX VirtualRepoOriginIndex ON VirtualRepo (origin_repo);
CREATE TABLE WebUploadTempFiles (
repo_id char(36),
file_path varchar2(1024 char),
tmp_file_path varchar2(1024 char)
);

49
scripts/index_op.py Executable file
View File

@ -0,0 +1,49 @@
import logging
import argparse
from seafes.config import seafes_config
from seafes.repo_data import repo_data
from seafes.mq import get_mq
seafes_config.load_index_master_conf()
mq = get_mq(seafes_config.subscribe_mq,
seafes_config.subscribe_server,
seafes_config.subscribe_port,
seafes_config.subscribe_password)
def put_to_redis(repo_id, cmt_id):
msg = "index_recover\t%s\t%s" % (repo_id, cmt_id)
mq.lpush('index_task', msg)
def show_all_task():
logging.info("index task count: %s" % mq.llen('index_task'))
def restore_all_repo():
start, count = 0, 1000
while True:
try:
repo_commits = repo_data.get_repo_id_commit_id(start, count)
except Exception as e:
logging.error("Error: %s" % e)
return
else:
if len(repo_commits) == 0:
break
for repo_id, commit_id in repo_commits:
put_to_redis(repo_id, commit_id)
start += 1000
def main():
parser = argparse.ArgumentParser(description='main program')
parser.add_argument('--mode')
parser_args = parser.parse_args()
if parser_args.mode == 'resotre_all_repo':
restore_all_repo()
elif parser_args.mode == 'show_all_task':
show_all_task()
if __name__ == '__main__':
main()

153
scripts/migrate-repo.py Normal file
View File

@ -0,0 +1,153 @@
#!/usr/bin/env python3
import os
import sys
import logging
import configparser
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker
from migrate import ObjMigrateWorker
from seafobj.objstore_factory import objstore_factory
from seaserv import seafile_api as api
from seaserv import REPO_STATUS_READ_ONLY, REPO_STATUS_NORMAL
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def main(argv):
if len(argv) == 4:
all_migrate = False
repo_id = argv[1]
orig_storage_id = argv[2]
dest_storage_id = argv[3]
elif len(argv) == 3:
all_migrate = True
orig_storage_id = argv[1]
dest_storage_id = argv[2]
if all_migrate:
migrate_repos(orig_storage_id, dest_storage_id)
else:
migrate_repo(repo_id, orig_storage_id, dest_storage_id)
def parse_seafile_config():
env = os.environ
seafile_conf = os.path.join(env['SEAFILE_CENTRAL_CONF_DIR'], 'seafile.conf')
cp = configparser.ConfigParser()
cp.read(seafile_conf)
host = cp.get('database', 'host')
port = cp.get('database', 'port')
user = cp.get('database', 'user')
passwd = cp.get('database', 'password')
db_name = cp.get('database', 'db_name')
return host, port, user, passwd, db_name
def get_repo_ids():
host, port, user, passwd, db_name = parse_seafile_config()
url = 'mysql+pymysql://' + user + ':' + passwd + '@' + host + ':' + port + '/' + db_name
print(url)
sql = 'SELECT repo_id FROM Repo'
try:
engine = create_engine(url, echo=False)
session = sessionmaker(engine)()
result_proxy = session.execute(text(sql))
except:
return None
else:
result = result_proxy.fetchall()
return result
def migrate_repo(repo_id, orig_storage_id, dest_storage_id):
api.set_repo_status (repo_id, REPO_STATUS_READ_ONLY)
dtypes = ['commits', 'fs', 'blocks']
workers = []
for dtype in dtypes:
obj_stores = objstore_factory.get_obj_stores(dtype)
#If these storage ids passed in do not exist in conf, stop migrate this repo.
if orig_storage_id not in obj_stores or dest_storage_id not in obj_stores:
logging.warning('Storage id passed in does not exist in configuration.\n')
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
sys.exit()
orig_store = obj_stores[orig_storage_id]
dest_store = obj_stores[dest_storage_id]
try:
worker = ObjMigrateWorker (orig_store, dest_store, dtype, repo_id)
worker.start()
workers.append(worker)
except:
logging.warning('Failed to migrate repo %s.', repo_id)
try:
for w in workers:
w.join()
except:
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
sys.exit(1)
for w in workers:
if w.exit_code == 1:
logging.warning(w.exception)
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
sys.exit(1)
if api.update_repo_storage_id(repo_id, dest_storage_id) < 0:
logging.warning('Failed to update repo [%s] storage_id.\n', repo_id)
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
return
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
logging.info('The process of migrating repo [%s] is over.\n', repo_id)
def migrate_repos(orig_storage_id, dest_storage_id):
repo_ids = get_repo_ids()
for repo_id in repo_ids:
try:
repo_id = repo_id[0]
except:
continue
api.set_repo_status (repo_id, REPO_STATUS_READ_ONLY)
dtypes = ['commits', 'fs', 'blocks']
workers = []
for dtype in dtypes:
obj_stores = objstore_factory.get_obj_stores(dtype)
#If these storage ids passed in do not exist in conf, stop migrate this repo.
if orig_storage_id not in obj_stores or dest_storage_id not in obj_stores:
logging.warning('Storage id passed in does not exist in configuration.\n')
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
sys.exit()
orig_store = obj_stores[orig_storage_id]
dest_store = obj_stores[dest_storage_id]
try:
worker = ObjMigrateWorker (orig_store, dest_store, dtype, repo_id)
worker.start()
workers.append(worker)
except:
logging.warning('Failed to migrate repo %s.', repo_id)
try:
for w in workers:
w.join()
except:
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
sys.exit(1)
for w in workers:
if w.exit_code == 1:
logging.warning(w.exception)
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
sys.exit(1)
if api.update_repo_storage_id(repo_id, dest_storage_id) < 0:
logging.warning('Failed to update repo [%s] storage_id.\n', repo_id)
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
return
api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
logging.info('The process of migrating repo [%s] is over.\n', repo_id)
if __name__ == '__main__':
main(sys.argv)

80
scripts/migrate-repo.sh Executable file
View File

@ -0,0 +1,80 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
default_conf_dir=${TOPDIR}/conf
seafile_rpc_pipe_path=${INSTALLPATH}/runtime
migrate=${INSTALLPATH}/migrate-repo.py
script_name=$0
function usage () {
echo "usage : "
echo " ./$(basename ${script_name})" \
"[repo id to migrate]" \
"<origin storage id>" \
"<destination storage id>"
echo""
}
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python3 2>/dev/null 1>&2; then
PYTHON=python3
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
echo
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
echo
exit 1
else
PYTHON="python"$(python --version | cut -b 8-10)
if !which $PYTHON 2>/dev/null 1>&2; then
echo
echo "Can't find a python executable of $PYTHON in PATH"
echo "Install $PYTHON before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
fi
}
function do_migrate () {
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
export SEAFILE_CONF_DIR=${default_seafile_data_dir}
export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir}
export SEAFILE_RPC_PIPE_PATH=${seafile_rpc_pipe_path}
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
$PYTHON ${migrate} $@
}
check_python_executable;
if [ $# -gt 0 ];
then
for param in $@;
do
if [ ${param} = "-h" -o ${param} = "--help" ];
then
usage;
exit 1;
fi
done
fi
if [ $# -ne 3 ] && [ $# -ne 2 ];
then
usage;
exit 1;
fi
do_migrate $@;
echo "Done."

213
scripts/migrate.py Executable file
View File

@ -0,0 +1,213 @@
#!/usr/bin/env python3
#coding: utf-8
import os
import re
import sys
import logging
import queue
import threading
from threading import Thread
from uuid import UUID
from seafobj.objstore_factory import SeafObjStoreFactory
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
class Worker(Thread):
def __init__(self, do_work, task_queue):
Thread.__init__(self)
self.do_work = do_work
self.task_queue = task_queue
def run(self):
while True:
try:
task = self.task_queue.get()
if task is None:
break
self.do_work(task)
except Exception as e:
logging.warning('Failed to execute task: %s' % e)
finally:
self.task_queue.task_done()
class ThreadPool(object):
def __init__(self, do_work, nworker=20):
self.do_work = do_work
self.nworker = nworker
self.task_queue = queue.Queue(maxsize = 2000)
def start(self):
for i in range(self.nworker):
Worker(self.do_work, self.task_queue).start()
def put_task(self, task):
self.task_queue.put(task)
def join(self):
self.task_queue.join()
# notify all thread to stop
for i in range(self.nworker):
self.task_queue.put(None)
class Task(object):
def __init__(self, repo_id, repo_version, obj_id):
self.repo_id = repo_id
self.repo_version = repo_version
self.obj_id = obj_id
class ObjMigrateWorker(Thread):
def __init__(self, orig_store, dest_store, dtype, repo_id = None):
Thread.__init__(self)
self.lock = threading.Lock()
self.dtype = dtype
self.orig_store = orig_store
self.dest_store = dest_store
self.repo_id = repo_id
self.thread_pool = ThreadPool(self.do_work)
self.write_count = 0
self.fetch_count = 0
self.dest_objs = {}
self.object_list_file_path = ''
self.fd = None
self.exit_code = 0
self.exception = None
def run(self):
try:
self._run()
except Exception as e:
self.exit_code = 1
self.exception = e
def _run(self):
if 'OBJECT_LIST_FILE_PATH' in os.environ:
if self.repo_id:
self.object_list_file_path = '.'.join(['_'.join([os.environ['OBJECT_LIST_FILE_PATH'], self.repo_id]), self.dtype])
else:
self.object_list_file_path = '.'.join([os.environ['OBJECT_LIST_FILE_PATH'], self.dtype])
if self.object_list_file_path and \
os.path.exists(self.object_list_file_path) and \
os.path.getsize(self.object_list_file_path) > 0:
logging.info('Start to load [%s] destination object from file' % self.dtype)
with open(self.object_list_file_path, 'r') as f:
for line in f:
obj = line.rstrip('\n').split('/', 1)
if self.invalid_obj(obj):
continue
self.fetch_count += 1
if obj[0] in self.dest_objs:
self.dest_objs[obj[0]].add(obj[1])
else:
self.dest_objs[obj[0]] = set()
self.dest_objs[obj[0]].add(obj[1])
else:
logging.info('Start to fetch [%s] object from destination' % self.dtype)
if self.object_list_file_path:
f = open(self.object_list_file_path, 'a')
for obj in self.dest_store.list_objs(self.repo_id):
if self.invalid_obj(obj):
continue
self.fetch_count += 1
if obj[0] in self.dest_objs:
self.dest_objs[obj[0]].add(obj[1])
else:
self.dest_objs[obj[0]] = set()
self.dest_objs[obj[0]].add(obj[1])
if self.object_list_file_path:
f.write('/'.join(obj[:2]) + '\n')
if self.fetch_count % 100 == 0:
f.flush()
if self.object_list_file_path:
f.close()
logging.info('[%s] [%d] objects exist in destination' % (self.dtype, self.fetch_count))
if self.object_list_file_path:
self.fd = open(self.object_list_file_path, 'a')
logging.info('Start to migrate [%s] object' % self.dtype)
self.thread_pool.start()
self.migrate()
self.thread_pool.join()
if self.object_list_file_path:
self.fd.close()
logging.info('Complete migrate [%s] object' % self.dtype)
def do_work(self, task):
try:
exists = False
if task.repo_id in self.dest_objs:
if task.obj_id in self.dest_objs[task.repo_id]:
exists = True
except Exception as e:
logging.warning('[%s] Failed to check object %s existence from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e))
raise
if not exists:
try:
data = self.orig_store.read_obj_raw(task.repo_id, task.repo_version, task.obj_id)
except Exception as e:
logging.warning('[%s] Failed to read object %s from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e))
raise
try:
self.dest_store.write_obj(data, task.repo_id, task.obj_id)
self.write_count += 1
if self.write_count % 100 == 0:
logging.info('[%s] task: %s objects written to destination.', self.dtype, self.write_count)
if self.object_list_file_path:
with self.lock:
self.fd.write('/'.join([task.repo_id, task.obj_id]) + '\n')
if self.write_count % 100 == 0:
self.fd.flush()
except Exception as e:
logging.warning('[%s] Failed to write object %s from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e))
raise
def migrate(self):
try:
obj_list = self.orig_store.list_objs(self.repo_id)
except Exception as e:
logging.warning('[%s] Failed to list all objects: %s' % (self.dtype, e))
raise
for obj in obj_list:
if self.invalid_obj(obj):
continue
repo_id = obj[0]
obj_id = obj[1]
task = Task(repo_id, 1, obj_id)
self.thread_pool.put_task(task)
def invalid_obj(self, obj):
if len(obj) < 2:
return True
try:
UUID(obj[0], version = 4)
except ValueError:
return True
if len(obj[1]) != 40 or not re.match('\A[0-9a-f]+\Z', obj[1]):
return True
return False
def main():
try:
orig_obj_factory = SeafObjStoreFactory()
os.environ['SEAFILE_CENTRAL_CONF_DIR'] = os.environ['DEST_SEAFILE_CENTRAL_CONF_DIR']
except KeyError:
logging.warning('DEST_SEAFILE_CENTRAL_CONF_DIR environment variable is not set.\n')
sys.exit()
dest_obj_factory = SeafObjStoreFactory()
dtypes = ['commits', 'fs', 'blocks']
for dtype in dtypes:
orig_store = orig_obj_factory.get_obj_store(dtype)
dest_store = dest_obj_factory.get_obj_store(dtype)
ObjMigrateWorker(orig_store, dest_store, dtype).start()
if __name__ == '__main__':
main()

80
scripts/migrate.sh Executable file
View File

@ -0,0 +1,80 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
default_conf_dir=${TOPDIR}/conf
migrate=${INSTALLPATH}/migrate.py
script_name=$0
function usage () {
echo "usage : "
echo " ./$(basename ${script_name}) destination_config_file_path"
echo ""
}
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python3 2>/dev/null 1>&2; then
PYTHON=python3
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
echo
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
echo
exit 1
else
PYTHON="python"$(python --version | cut -b 8-10)
if !which $PYTHON 2>/dev/null 1>&2; then
echo
echo "Can't find a python executable of $PYTHON in PATH"
echo "Install $PYTHON before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
fi
}
function do_migrate () {
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
export SEAFILE_CONF_DIR=${default_seafile_data_dir}
export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir}
export DEST_SEAFILE_CENTRAL_CONF_DIR=${dest_seafile_central_conf_dir}
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
$PYTHON ${migrate}
}
check_python_executable;
if [ $# -gt 0 ];
then
for param in $@;
do
if [ ${param} = "-h" -o ${param} = "--help" ];
then
usage;
exit 1;
fi
done
fi
if [ $# -ne 1 ];
then
usage;
exit 1;
fi
dest_seafile_central_conf_dir="$1"
do_migrate;
echo "Done."

817
scripts/pro.py Executable file
View File

@ -0,0 +1,817 @@
#!/usr/bin/env python3
'''
Setup/Start/Stop the extra components of Seafile Professional
The diretory layout:
- haiwen
- seafile-server-1.8.0
- seafile.sh
- seahub.sh
- seafile/
- seahub/
- pro
- pro.py
- python
- sqlalchemy/
- pyes/
- thrift/
- libevent
- python-daemon/
- lockfile/
- seafes/
- seafevents/
- seaf-dav/
- elasticsearch/
- misc
- seahub_extra.sql
- seafile-license.txt
- seahub.db
- seahub_settings.py
- ccnet/
- seafile-data/
- seahub-data/
- pro-data
- search/
- data/
- logs/
- seafevents.conf
- seafdav.conf
- seafevents.db
- index.log
- seafevents.log
'''
import os
import sys
import glob
import subprocess
import io
import getpass
try:
import pymysql
except:
pass
import configparser
########################
## Helper functions
########################
class InvalidAnswer(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
class Utils(object):
'''Groups all helper functions here'''
@staticmethod
def highlight(content):
'''Add ANSI color to content to get it highlighted on terminal'''
return '\x1b[33m%s\x1b[m' % content
@staticmethod
def info(msg, newline=True):
sys.stdout.write(msg)
if newline:
sys.stdout.write('\n')
@staticmethod
def error(msg):
'''Print error and exit'''
print()
print('Error: ' + msg)
sys.exit(1)
@staticmethod
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Run a program and wait it to finish, and return its exit code. The
standard output of this program is supressed.
'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(argv,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env)
return proc.wait()
@staticmethod
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Like run_argv but specify a command line string instead of argv'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(cmdline,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
shell=True)
return proc.wait()
@staticmethod
def prepend_env_value(name, value, env=None, seperator=':'):
'''prepend a new value to a list'''
if env is None:
env = os.environ
try:
current_value = env[name]
except KeyError:
current_value = ''
new_value = value
if current_value:
new_value += seperator + current_value
env[name] = new_value
@staticmethod
def must_mkdir(path):
'''Create a directory, exit on failure'''
try:
os.mkdir(path)
except OSError as e:
Utils.error('failed to create directory %s:%s' % (path, e))
@staticmethod
def find_in_path(prog):
if 'win32' in sys.platform:
sep = ';'
else:
sep = ':'
dirs = os.environ['PATH'].split(sep)
for d in dirs:
d = d.strip()
if d == '':
continue
path = os.path.join(d, prog)
if os.path.exists(path):
return path
return None
@staticmethod
def read_config(fn=None):
'''Return a case sensitive ConfigParser by reading the file "fn"'''
cp = configparser.ConfigParser()
cp.optionxform = str
if fn:
cp.read(fn)
return cp
@staticmethod
def write_config(cp, fn):
'''Return a case sensitive ConfigParser by reading the file "fn"'''
with open(fn, 'w') as fp:
cp.write(fp)
@staticmethod
def ask_question(desc,
key=None,
note=None,
default=None,
validate=None,
yes_or_no=False,
password=False):
'''Ask a question, return the answer.
@desc description, e.g. "What is the port of ccnet?"
@key a name to represent the target of the question, e.g. "port for
ccnet server"
@note additional information for the question, e.g. "Must be a valid
port number"
@default the default value of the question. If the default value is
not None, when the user enter nothing and press [ENTER], the default
value would be returned
@validate a function that takes the user input as the only parameter
and validate it. It should return a validated value, or throws an
"InvalidAnswer" exception if the input is not valid.
@yes_or_no If true, the user must answer "yes" or "no", and a boolean
value would be returned
@password If true, the user input would not be echoed to the
console
'''
assert key or yes_or_no
# Format description
print()
if note:
desc += '\n' + note
desc += '\n'
if yes_or_no:
desc += '[ yes or no ]'
else:
if default:
desc += '[ default "%s" ]' % default
else:
desc += '[ %s ]' % key
desc += ' '
while True:
# prompt for user input
if password:
answer = getpass.getpass(desc).strip()
else:
answer = input(desc).strip()
# No user input: use default
if not answer:
if default:
answer = default
else:
continue
# Have user input: validate answer
if yes_or_no:
if answer not in ['yes', 'no']:
print(Utils.highlight('\nPlease answer yes or no\n'))
continue
else:
return answer == 'yes'
else:
if validate:
try:
return validate(answer)
except InvalidAnswer as e:
print(Utils.highlight('\n%s\n' % e))
continue
else:
return answer
@staticmethod
def validate_port(port):
try:
port = int(port)
except ValueError:
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
if port <= 0 or port > 65535:
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
return port
@staticmethod
def get_python_executable():
'''Find a suitable python executable'''
try_list = [
'python3',
]
for prog in try_list:
path = Utils.find_in_path(prog)
if path is not None:
return path
path = os.environ.get('PYTHON', 'python')
if not path:
Utils.error('Can not find python executable')
return path
@staticmethod
def pkill(process):
'''Kill the program with the given name'''
argv = [
'pkill', '-f', process
]
Utils.run_argv(argv)
class EnvManager(object):
'''System environment and directory layout'''
def __init__(self):
self.install_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.top_dir = os.path.dirname(self.install_path)
self.bin_dir = os.path.join(self.install_path, 'seafile', 'bin')
self.central_config_dir = os.path.join(self.top_dir, 'conf')
self.pro_data_dir = os.path.join(self.top_dir, 'pro-data')
self.pro_program_dir = os.path.join(self.install_path, 'pro')
self.pro_pylibs_dir = os.path.join(self.pro_program_dir, 'python')
self.pro_misc_dir = os.path.join(self.pro_program_dir, 'misc')
self.seafes_dir = os.path.join(self.pro_pylibs_dir, 'seafes')
self.seahub_dir = os.path.join(self.install_path, 'seahub')
self.ccnet_dir = os.path.join(self.top_dir, 'ccnet')
self.seafile_dir = os.path.join(self.top_dir, 'seafile-data')
self.central_config_dir = os.path.join(self.top_dir, 'conf')
self.seafile_rpc_pipe_path = os.path.join(self.install_path, 'runtime');
def get_seahub_env(self):
'''Prepare for seahub syncdb'''
env = dict(os.environ)
env['CCNET_CONF_DIR'] = self.ccnet_dir
env['SEAFILE_CONF_DIR'] = self.seafile_dir
env['SEAFILE_CENTRAL_CONF_DIR'] = self.central_config_dir
env['SEAFILE_RPC_PIPE_PATH'] = self.seafile_rpc_pipe_path
env['SEAFES_DIR'] = self.seafes_dir
env['SEAHUB_DIR'] = self.seahub_dir
self.setup_python_path(env)
return env
def setup_python_path(self, env):
'''And PYTHONPATH and CCNET_CONF_DIR/SEAFILE_CONF_DIR to env, which is
needed by seahub
'''
extra_python_path = [
self.pro_pylibs_dir,
os.path.join(self.top_dir, 'conf'), # LDAP sync has to access seahub_settings.py
os.path.join(self.install_path, 'seahub', 'thirdpart'),
os.path.join(self.install_path, 'seahub-extra'),
os.path.join(self.install_path, 'seahub-extra', 'thirdparts'),
os.path.join(self.install_path, 'seafile/lib/python3/site-packages'),
os.path.join(self.install_path, 'seafile/lib64/python3/site-packages'),
]
for path in extra_python_path:
Utils.prepend_env_value('PYTHONPATH', path, env=env)
########################
## END helper functions
########################
class Elasticsearch(object):
def __init__(self):
self.es_executable = os.path.join(env_mgr.pro_program_dir,
'elasticsearch', 'bin', 'elasticsearch')
self.es_logs_dir = os.path.join(env_mgr.pro_data_dir, 'search', 'logs')
self.es_data_dir = os.path.join(env_mgr.pro_data_dir, 'search', 'data')
def start(self):
'''Start Elasticsearch. We use -D command line args to specify the
location of logs and data
'''
argv = [
self.es_executable,
'-Des.path.logs=%s' % self.es_logs_dir,
'-Des.path.data=%s' % self.es_data_dir,
]
Utils.run_argv(argv, suppress_stdout=True, suppress_stderr=True)
def stop(self):
Utils.pkill('org.elasticsearch.bootstrap.ElasticSearch')
class DBConf(object):
'''Abstract class for database configuration'''
TYPE_SQLITE = 'sqlite'
TYPE_MYSQL = 'mysql'
DB_SECTION = 'DATABASE'
def __init__(self, db_type):
self.db_type = db_type
def generate_conf(self, config):
raise NotImplementedError
def create_extra_tables(self):
raise NotImplementedError
def generate_config_text(self):
config = Utils.read_config()
self.generate_conf(config)
buf = io.StringIO()
config.write(buf)
buf.flush()
return buf.getvalue()
class MySQLDBConf(DBConf):
def __init__(self):
DBConf.__init__(self, self.TYPE_MYSQL)
self.mysql_host = ''
self.mysql_port = ''
self.mysql_user = ''
self.mysql_password = ''
self.mysql_db = ''
self.conn = None
def generate_conf(self, config):
# [DATABASE]
# type=mysql
# path=x.db
# username=seafevents
# password=seafevents
# name=seafevents
# host=localhost
config.add_section(self.DB_SECTION)
config.set(self.DB_SECTION, 'type', 'mysql')
if self.mysql_host:
config.set(self.DB_SECTION, 'host', self.mysql_host)
if self.mysql_port:
config.set(self.DB_SECTION, 'port', str(self.mysql_port))
config.set(self.DB_SECTION, 'username', self.mysql_user)
config.set(self.DB_SECTION, 'password', self.mysql_password)
config.set(self.DB_SECTION, 'name', self.mysql_db)
def create_extra_tables(self):
self.get_conn()
sql_file = os.path.join(env_mgr.pro_misc_dir, 'seahub_extra.mysql.sql')
with open(sql_file, 'r') as fp:
content = fp.read()
sqls = content.split(';')
for sql in sqls:
sql = sql.strip()
if not sql:
continue
print('>>> sql is', sql, len(sql))
self.exec_sql(sql)
def exec_sql(self, sql):
cursor = self.conn.cursor()
try:
cursor.execute(sql)
except Exception as e:
if isinstance(e, pymysql.err.OperationalError):
Utils.error('Failed to create extra tables: %s' % e.args[1])
else:
Utils.error('Failed to create extra tables: %s' % e)
def get_conn(self):
print('host is', self.mysql_host)
print('port is', self.mysql_port)
kwargs = dict(user=self.mysql_user,
passwd=self.mysql_password,
db=self.mysql_db)
if self.mysql_port:
kwargs['port'] = self.mysql_port
if self.mysql_host:
kwargs['host'] = self.mysql_host
try:
self.conn = pymysql.connect(**kwargs)
except Exception as e:
if isinstance(e, pymysql.err.OperationalError):
Utils.error('Failed to connect to mysql database %s: %s' % (self.mysql_db, e.args[1]))
else:
Utils.error('Failed to connect to mysql database %s: %s' % (self.mysql_db, e))
class SQLiteDBConf(DBConf):
def __init__(self):
DBConf.__init__(self, self.TYPE_SQLITE)
self.db_path = os.path.join(env_mgr.pro_data_dir, 'seafevents.db')
def generate_conf(self, config):
# [DATABASE]
# type=sqlite3
# path=x.db
config.add_section(self.DB_SECTION)
config.set(self.DB_SECTION, 'type', 'sqlite3')
config.set(self.DB_SECTION, 'path', self.db_path)
def create_extra_tables(self):
seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db')
sql_file = os.path.join(env_mgr.pro_misc_dir, 'seahub_extra.sqlite3.sql')
Utils.info('Create extra database tables ... ', newline=False)
cmd = 'sqlite3 %s < %s' % (seahub_db, sql_file)
if os.system(cmd) != 0:
Utils.error('\nfailed to create seahub extra database tables')
Utils.info('Done')
class ProfessionalConfigurator(object):
'''Main abstract class for the config process '''
def __init__(self, args, migrate=False):
self.args = args
self.migrate = migrate
self.db_type = ''
self.db_config = None # database config strategy
self.seafevents_conf = os.path.join(env_mgr.central_config_dir, 'seafevents.conf')
def check_pre_condition(self):
raise NotImplementedError
def config(self):
raise NotImplementedError
def generate(self):
self.generate_seafevents_conf()
def generate_seafevents_conf(self):
template = '''\
%(db_config_text)s
[AUDIT]
enabled = true
[INDEX FILES]
enabled = true
interval = 10m
highlight = fvh
## If true, indexes the contents of office/pdf files while updating search index
## Note: If you change this option from "false" to "true", then you need to clear the search index and update the index again. See the FAQ for details.
index_office_pdf = true
[SEAHUB EMAIL]
enabled = true
## interval of sending Seahub email. Can be s(seconds), m(minutes), h(hours), d(days)
interval = 30m
# Enable statistics
[STATISTICS]
enabled=true
'''
db_config_text = self.db_config.generate_config_text()
if not os.path.exists(env_mgr.pro_data_dir):
os.makedirs(env_mgr.pro_data_dir)
os.chmod(env_mgr.pro_data_dir, 0o700)
with open(self.seafevents_conf, 'w') as fp:
fp.write(template % dict(db_config_text=db_config_text))
class MigratingProfessionalConfigurator(ProfessionalConfigurator):
'''This scripts is used standalone to migrate from community version to
professional version
'''
def __init__(self, args):
ProfessionalConfigurator.__init__(self, args, migrate=True)
def check_pre_condition(self):
self.check_java()
def config(self):
self.detect_db_type()
# self.create_extra_tables()
self.update_avatars_link()
def detect_db_type(self):
'''Read database info from seahub_settings.py'''
sys.path.insert(0, env_mgr.central_config_dir)
try:
from seahub_settings import DATABASES # pylint: disable=F0401
except ImportError:
print('Failed to import "DATABASES" from seahub_settings.py, assuming sqlite3')
self.db_config = SQLiteDBConf()
return
try:
default_config = DATABASES['default']
if default_config['ENGINE'] == 'django.db.backends.mysql':
db_config = MySQLDBConf()
db_config.mysql_host = default_config.get('HOST', '')
db_config.mysql_port = default_config.get('PORT', '')
db_config.mysql_user = default_config.get('USER', '')
db_config.mysql_password = default_config.get('PASSWORD', '')
db_config.mysql_db = default_config['NAME']
if db_config.mysql_port:
db_config.mysql_port = int(db_config.mysql_port)
print('Your seafile server is using mysql')
self.db_config = db_config
else:
print('Your seafile server is using sqlite3')
self.db_config = SQLiteDBConf()
except KeyError:
Utils.error('Error in your config %s' % \
os.path.join(env_mgr.top_dir, 'seahub_settings.py'))
def update_avatars_link(self):
minor_upgrade_script = os.path.join(env_mgr.install_path, 'upgrade', 'minor-upgrade.sh')
argv = [
minor_upgrade_script
]
if Utils.run_argv(argv) != 0:
Utils.error('failed to update avatars folder')
def check_java(self):
Utils.info('\nChecking java ... ', newline=False)
if not Utils.find_in_path('java'):
msg = '''\nJava is not found. instal it first.\n
On Debian/Ubuntu: apt-get install default-jre
On CentOS/RHEL: yum install jre
'''
Utils.error(msg)
Utils.info('Done')
def create_extra_tables(self):
'''Create seahub-extra database tables'''
self.db_config.create_extra_tables()
class SetupProfessionalConfigurator(ProfessionalConfigurator):
'''This script is invokded by setup-seafile.sh/setup-seafile-mysql.sh to
generate seafile pro related conf
To setup sqlite3:
./pro.py setup
To setup mysql:
./pro.py setup --mysql
--mysql_host=
--mysql_port=
--mysql_user=
--mysql_password=
--mysql_db=
'''
def __init__(self, args):
ProfessionalConfigurator.__init__(self, args, migrate=False)
def config(self):
if self.args.mysql:
db_config = MySQLDBConf()
db_config.mysql_host = self.args.mysql_host
db_config.mysql_port = self.args.mysql_port
db_config.mysql_user = self.args.mysql_user
db_config.mysql_password = self.args.mysql_password
db_config.mysql_db = self.args.mysql_db
else:
db_config = SQLiteDBConf()
self.db_config = db_config
def check_pre_condition(self):
pass
def do_setup(args):
global pro_config
if args.migrate:
pro_config = MigratingProfessionalConfigurator(args)
else:
pro_config = SetupProfessionalConfigurator(args)
pro_config.check_pre_condition()
pro_config.config()
pro_config.generate()
def handle_search_commands(args):
'''provide search related utility'''
if args.update:
update_search_index()
elif args.clear:
delete_search_index()
def get_seafes_env():
env = env_mgr.get_seahub_env()
events_conf = os.path.join(env_mgr.central_config_dir, 'seafevents.conf')
env['EVENTS_CONFIG_FILE'] = events_conf
return env
def update_search_index():
argv = [
Utils.get_python_executable(),
'-m', 'seafes.index_local',
'--loglevel', 'debug',
'update',
]
Utils.info('\nUpdating search index, this may take a while...\n')
Utils.run_argv(argv, env=get_seafes_env())
def delete_search_index():
choice = None
while choice not in ('y', 'n', ''):
prompt = 'Delete seafile search index ([y]/n)? '
choice = input(prompt).strip()
if choice == 'n':
return
argv = [
Utils.get_python_executable(),
'-m', 'seafes.index_local',
'--loglevel', 'debug',
'clear',
]
Utils.info('\nDelete search index, this may take a while...\n')
Utils.run_argv(argv, env=get_seafes_env())
def handle_ldap_sync_commands(args):
if args.test:
argv = [
Utils.get_python_executable(),
'-m', 'seafevents.ldap_syncer.run_ldap_sync',
'-t',
]
else:
argv = [
Utils.get_python_executable(),
'-m', 'seafevents.ldap_syncer.run_ldap_sync',
]
Utils.run_argv(argv, env=env_mgr.get_seahub_env())
def handle_virus_scan_commands(args):
argv = [
Utils.get_python_executable(),
'-m', 'seafevents.virus_scanner.run_virus_scan',
'-c', os.path.join(env_mgr.central_config_dir, 'seafevents.conf'),
]
Utils.run_argv(argv, env=env_mgr.get_seahub_env())
pro_config = None
env_mgr = EnvManager()
def main():
try:
import argparse
except ImportError:
sys.path.insert(0, glob.glob(os.path.join(env_mgr.pro_pylibs_dir, 'argparse*.egg'))[0])
import argparse
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='subcommands', description='')
# setup
parser_setup = subparsers.add_parser('setup', help='Setup extra components of seafile pro')
parser_setup.set_defaults(func=do_setup)
parser_setup.add_argument('--migrate', help='migrate from community version', action='store_true')
# for non-migreate setup
parser_setup.add_argument('--mysql', help='use mysql', action='store_true')
parser_setup.add_argument('--mysql_host')
parser_setup.add_argument('--mysql_port')
parser_setup.add_argument('--mysql_user')
parser_setup.add_argument('--mysql_password')
parser_setup.add_argument('--mysql_db')
# search
parser_search = subparsers.add_parser('search', help='search related utility commands')
parser_search.add_argument('--update', help='update seafile search index', action='store_true')
parser_search.add_argument('--clear', help='delete seafile search index', action='store_true')
parser_search.set_defaults(func=handle_search_commands)
# ldapsync
parser_ldap_sync = subparsers.add_parser('ldapsync', help='ldap sync commands')
parser_ldap_sync.add_argument('-t', '--test', help='test ldap sync', action='store_true')
parser_ldap_sync.set_defaults(func=handle_ldap_sync_commands)
# virus scan
parser_virus_scan = subparsers.add_parser('virus_scan', help='virus scan commands')
parser_virus_scan.set_defaults(func=handle_virus_scan_commands)
if len(sys.argv) == 1:
print(parser.format_help())
return
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()

41
scripts/remove-objs.py Normal file
View File

@ -0,0 +1,41 @@
#!/usr/bin/env python3
import sys
import os
import logging
from seafobj.objstore_factory import objstore_factory
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def main(argv):
repo_id = argv[1]
orig_storage_id = argv[2]
dtypes = ['commits', 'fs', 'blocks']
for dtype in dtypes:
if 'OBJECT_LIST_FILE_PATH' in os.environ:
object_list_file_path = '.'.join(['_'.join([os.environ['OBJECT_LIST_FILE_PATH'], repo_id]), dtype])
else:
logging.warning('OBJECT_LIST_FILE_PATH environment does not exist.')
sys.exit()
obj_stores = objstore_factory.get_obj_stores(dtype)
#If these storage ids passed in do not exist in conf, stop migrate this repo.
if orig_storage_id not in obj_stores:
logging.warning('Storage id passed in does not exist in configuration.\n')
sys.exit()
orig_store = obj_stores[orig_storage_id]
with open(object_list_file_path, 'r') as f:
for line in f:
obj = line.rstrip('\n').split('/', 1)
try:
orig_store.remove_obj(obj[0], obj[1])
except Exception as e:
logging.warning('Failed to remove object %s from repo %s:%s' % (obj[1], obj[0], e))
logging.info('The process of remove repo [%s] is over.\n', repo_id)
if __name__ == '__main__':
main(sys.argv)

79
scripts/remove-objs.sh Executable file
View File

@ -0,0 +1,79 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
default_conf_dir=${TOPDIR}/conf
seafile_rpc_pipe_path=${INSTALLPATH}/runtime
remove=${INSTALLPATH}/remove-objs.py
script_name=$0
function usage () {
echo "usage : "
echo " ./$(basename ${script_name})" \
"<repo id to migrate>" \
"<storage id>" \
echo""
}
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python3 2>/dev/null 1>&2; then
PYTHON=python3
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
echo
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
echo
exit 1
else
PYTHON="python"$(python --version | cut -b 8-10)
if !which $PYTHON 2>/dev/null 1>&2; then
echo
echo "Can't find a python executable of $PYTHON in PATH"
echo "Install $PYTHON before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
fi
}
function do_remove () {
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
export SEAFILE_CONF_DIR=${default_seafile_data_dir}
export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir}
export SEAFILE_RPC_PIPE_PATH=${seafile_rpc_pipe_path}
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
$PYTHON ${remove} $@
}
check_python_executable;
if [ $# -gt 0 ];
then
for param in $@;
do
if [ ${param} = "-h" -o ${param} = "--help" ];
then
usage;
exit 1;
fi
done
fi
if [ $# -ne 2 ];
then
usage;
exit 1;
fi
do_remove $@;
echo "Done."

138
scripts/run_index_master.sh Executable file
View File

@ -0,0 +1,138 @@
#!/bin/bash
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
central_config_dir=${TOPDIR}/conf
pro_pylibs_dir=${INSTALLPATH}/pro/python
pidfile=${INSTALLPATH}/runtime/index_master.pid
script_name=$0
function usage () {
echo "Usage: "
echo
echo " $(basename ${script_name}) { start | stop | restart | python-env }"
}
if [[ $1 != "start" && $1 != "stop" && $1 != "restart" && $1 != "python-env" ]]; then
usage;
exit 1;
fi
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python3 2>/dev/null 1>&2; then
PYTHON=python3
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
echo
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
echo
exit 1
else
PYTHON="python"$(python --version | cut -b 8-10)
if !which $PYTHON 2>/dev/null 1>&2; then
echo
echo "Can't find a python executable of $PYTHON in PATH"
echo "Install $PYTHON before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
fi
}
function validate_seafile_data_dir () {
if [[ ! -d ${default_seafile_data_dir} ]]; then
echo "Error: there is no seafile server data directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit 1;
fi
}
function prepare_log_dir() {
logdir=${TOPDIR}/logs
if ! [[ -d ${logsdir} ]]; then
if ! mkdir -p "${logdir}"; then
echo "ERROR: failed to create logs dir \"${logdir}\""
exit 1
fi
fi
export LOG_DIR=${logdir}
}
function before_start() {
check_python_executable;
prepare_log_dir;
validate_seafile_data_dir;
export SEAFILE_CONF_DIR=${default_seafile_data_dir}
export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
export SEAFES_DIR=$pro_pylibs_dir/seafes
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir
export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/
export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts
export EVENTS_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/seafevents.conf
export INDEX_MASTER_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/index-master.conf
}
run_python() {
before_start;
$PYTHON ${@:2}
}
start_index_master() {
before_start;
nohup $PYTHON -m seafes.index_master --loglevel debug --logfile ${logdir}/index_master.log start & echo $! > $pidfile
sleep 2
if ! pgrep -f "seafes.index_master" 2>/dev/null 1>&2; then
printf "\033[33mError:Index master failed to start.\033[m\n"
echo "Please try to run \"./run_index_master.sh start\" again"
exit 1;
fi
echo
echo "Index master is started"
echo
}
stop_index_master() {
if pgrep -f "seafes.index_worker" 2>/dev/null 1>&2; then
printf "\033[33mError:Index worker need be stopped first.\033[m\n"
exit 1;
fi
if [[ -f ${pidfile} ]]; then
pid=$(cat "${pidfile}")
echo "Stopping index master ..."
kill ${pid}
rm -f ${pidfile}
return 0
else
echo "Index master is not running"
fi
}
case $1 in
"start" )
start_index_master;
;;
"stop" )
stop_index_master;
;;
"restart" )
stop_index_master
sleep 2
start_index_master
;;
"python-env" )
run_python "$@"
;;
esac

122
scripts/run_index_worker.sh Executable file
View File

@ -0,0 +1,122 @@
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
central_config_dir=${TOPDIR}/conf
pro_pylibs_dir=${INSTALLPATH}/pro/python
pidfile=${INSTALLPATH}/runtime/index_worker.pid
script_name=$0
function usage () {
echo "Usage: "
echo
echo " $(basename ${script_name}) { start | stop | restart }"
}
if [[ $1 != "start" && $1 != "stop" && $1 != "restart" ]]; then
usage;
exit 1;
fi
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python3 2>/dev/null 1>&2; then
PYTHON=python3
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
echo
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
echo
exit 1
else
PYTHON="python"$(python --version | cut -b 8-10)
if !which $PYTHON 2>/dev/null 1>&2; then
echo
echo "Can't find a python executable of $PYTHON in PATH"
echo "Install $PYTHON before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
fi
}
function validate_seafile_data_dir () {
if [[ ! -d ${default_seafile_data_dir} ]]; then
echo "Error: there is no seafile server data directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit 1;
fi
}
function prepare_log_dir() {
logdir=${TOPDIR}/logs
if ! [[ -d ${logsdir} ]]; then
if ! mkdir -p "${logdir}"; then
echo "ERROR: failed to create logs dir \"${logdir}\""
exit 1
fi
fi
export LOG_DIR=${logdir}
}
function before_start() {
check_python_executable;
prepare_log_dir;
validate_seafile_data_dir;
export SEAFILE_CONF_DIR=${default_seafile_data_dir}
export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
export SEAFES_DIR=$pro_pylibs_dir/seafes
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir
export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/
export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts
export EVENTS_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/seafevents.conf
export INDEX_SLAVE_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/index-slave.conf
}
start_index_worker() {
before_start;
nohup $PYTHON -m seafes.index_worker --loglevel debug --logfile ${logdir}/index_worker.log start & echo $! > $pidfile
sleep 2
if ! pgrep -f "seafes.index_worker" 2>/dev/null 1>&2; then
printf "\033[33mError:Index worker failed to start.\033[m\n"
echo "Please try to run \"./run_index_worker.sh start\" again"
exit 1;
fi
echo
echo "Index worker is started"
echo
}
stop_index_worker() {
if [[ -f ${pidfile} ]]; then
pid=$(cat "${pidfile}")
echo "Stopping index worker ..."
kill ${pid}
rm -f ${pidfile}
return 0
else
echo "Index worker is not running"
fi
}
case $1 in
"start" )
start_index_worker;
;;
"stop" )
stop_index_worker;
;;
"restart" )
stop_index_worker
sleep 2
start_index_worker
;;
esac

50
scripts/seaf-backup-cmd.py Executable file
View File

@ -0,0 +1,50 @@
#!/usr/bin/env python3
#coding: utf-8
import json
import argparse
from seaserv import seafile_api
from pysearpc import SearpcError
def show_backup_status(args):
ret_str = seafile_api.get_backup_status()
ret_dict = json.loads(ret_str)
print('Total number of libraries: %s' % ret_dict['n_total'])
print('Number of synchronized libraries: %s' % ret_dict['n_synced'])
print('Number of libraries waiting for sync: %s' % ret_dict['n_waiting'])
print('Number of libraries syncing: %s' % ret_dict['n_syncing'])
print('Number of libraries failed to sync: %s\n' % ret_dict['n_error'])
print('List of syncing libraries:')
for repo in ret_dict['syncing_repos']:
print(repo)
print('')
print('List of libraries failed to sync:')
for repo in ret_dict['error_repos']:
print(repo)
def sync_repo(args):
if len(args.repo_id) != 36:
print('Invalid repo id %s.' % args.repo_id)
return
try:
seafile_api.sync_repo_manually(args.repo_id, 1 if args.force else 0)
except SearpcError as e:
print('Failed to sync repo %s: %s.' % (args.repo_id, e))
else:
print('Sync repo %s successfully.' % args.repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers(title='subcommands', description='')
status_parser = subparser.add_parser('status', help='get backup status')
status_parser.set_defaults(func=show_backup_status)
sync_parser = subparser.add_parser('sync', help='sync repo')
sync_parser.add_argument('-f', '--force', help='force sync repo', action='store_true')
sync_parser.add_argument('repo_id', help='repo id to sync')
sync_parser.set_defaults(func=sync_repo)
args = parser.parse_args()
args.func(args)

70
scripts/seaf-backup-cmd.sh Executable file
View File

@ -0,0 +1,70 @@
#!/bin/bash
# This is a wrapper shell script for the real seaf-backup command.
# It prepares necessary environment variables and exec the real script.
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python3 2>/dev/null 1>&2; then
PYTHON=python3
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
echo
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
echo
exit 1
else
PYTHON="python"$(python --version | cut -b 8-10)
if !which $PYTHON 2>/dev/null 1>&2; then
echo
echo "Can't find a python executable of $PYTHON in PATH"
echo "Install $PYTHON before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
fi
}
check_python_executable
# seafile cli client requires the argparse module
if ! $PYTHON -c 'import argparse' 2>/dev/null 1>&2; then
echo
echo "Python argparse module is required"
echo "see [https://pypi.python.org/pypi/argparse]"
echo
exit 1
fi
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
central_config_dir=${TOPDIR}/conf
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
function validate_seafile_data_dir () {
if [[ ! -d ${default_seafile_data_dir} ]]; then
echo "Error: there is no seafile server data directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit 1;
fi
}
validate_seafile_data_dir
SEAFILE_PYTHON_PATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart
SEAF_BACKUP_CMD=${INSTALLPATH}/seaf-backup-cmd.py
export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime
export PYTHONPATH=${SEAFILE_PYTHON_PATH}:${PYTHONPATH}
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
export SEAFILE_CONF_DIR=${default_seafile_data_dir}
export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
$PYTHON ${SEAF_BACKUP_CMD} "$@"

91
scripts/seaf-encrypt.sh Executable file
View File

@ -0,0 +1,91 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
default_conf_dir=${TOPDIR}/conf
seaf_encrypt=${INSTALLPATH}/seafile/bin/seaf-encrypt
seaf_encrypt_opts=""
export PATH=${INSTALLPATH}/seafile/bin:$PATH
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
script_name=$0
function usage () {
echo "usage : "
echo -e "$(basename ${script_name}) \n" \
"-f <seafile enc central config dir, must set>\n" \
"-e <seafile enc data dir, must set>"
echo ""
}
function validate_seafile_data_dir () {
if [[ ! -d ${default_seafile_data_dir} ]]; then
echo "Error: there is no seafile server data directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit 1;
fi
}
function check_component_running() {
name=$1
cmd=$2
if pid=$(pgrep -f "$cmd" 2>/dev/null); then
echo "[$name] is running, pid $pid. You can stop it by: "
echo
echo " kill $pid"
echo
echo "Stop it and try again."
echo
exit
fi
}
function validate_already_running () {
if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then
echo "seafile server is still running, stop it by \"seafile.sh stop\""
echo
exit 1;
fi
check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}"
check_component_running "seafdav" "wsgidav.server.server_cli"
}
function run_seaf_encrypt () {
validate_seafile_data_dir;
validate_already_running;
echo "Starting seaf-encrypt, please wait ..."
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_encrypt} \
-c "${default_conf_dir}" \
-d "${default_seafile_data_dir}" \
${seaf_encrypt_opts}
echo "seaf-encrypt run done"
echo
}
if [ $# -gt 0 ];
then
for param in $@;
do
if [ ${param} = "-h" -o ${param} = "--help" ];
then
usage;
exit 1;
fi
done
fi
seaf_encrypt_opts=$@
run_seaf_encrypt;
echo "Done."

46
scripts/seaf-gen-key.sh Executable file
View File

@ -0,0 +1,46 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
seaf_genkey=${INSTALLPATH}/seafile/bin/seaf-gen-key
seaf_genkey_opts=""
export PATH=${INSTALLPATH}/seafile/bin:$PATH
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
script_name=$0
function usage () {
echo "usage : "
echo -e "$(basename ${script_name})\n" \
"-p <file path to write key iv, default ./seaf-key.txt>"
echo ""
}
function run_seaf_genkey () {
echo "Starting seaf-gen-key, please wait ..."
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_genkey} \
${seaf_genkey_opts}
echo "seaf-gen-key run done"
echo
}
if [ $# -gt 0 ];
then
for param in $@;
do
if [ ${param} = "-h" -o ${param} = "--help" ];
then
usage;
exit 1;
fi
done
fi
seaf_genkey_opts=$@
run_seaf_genkey;
echo "Done."

95
scripts/seaf-import.sh Executable file
View File

@ -0,0 +1,95 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
default_conf_dir=${TOPDIR}/conf
seaf_import=${INSTALLPATH}/seafile/bin/seaf-import
seaf_import_opts=""
export PATH=${INSTALLPATH}/seafile/bin:$PATH
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
script_name=$0
function usage () {
echo "usage : "
echo -e "$(basename ${script_name}) \n" \
"-p <import dir path, must set>\n" \
"-n <repo name, must set>\n" \
"-u <repo owner, must set>"
echo ""
}
function validate_seafile_data_dir () {
if [[ ! -d ${default_seafile_data_dir} ]]; then
echo "Error: there is no seafile server data directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit 1;
fi
}
function check_component_running() {
name=$1
cmd=$2
if pid=$(pgrep -f "$cmd" 2>/dev/null); then
echo "[$name] is running, pid $pid. You can stop it by: "
echo
echo " kill $pid"
echo
echo "Stop it and try again."
echo
exit
fi
}
<<'COMMENT'
function validate_already_running () {
if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then
echo "seafile server is still running, stop it by \"seafile.sh stop\""
echo
exit 1;
fi
check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}"
check_component_running "seafdav" "wsgidav.server.server_cli"
}
COMMENT
function run_seaf_import () {
validate_seafile_data_dir;
# validate_already_running;
echo "Starting seaf-import, please wait ..."
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_import} \
-c "${default_conf_dir}" \
-d "${default_seafile_data_dir}" \
${seaf_import_opts}
echo " run done"
echo
}
if [ $# -gt 0 ];
then
for param in $@;
do
if [ ${param} = "-h" -o ${param} = "--help" ];
then
usage;
exit 1;
fi
done
fi
seaf_import_opts=$@
run_seaf_import;
echo "Done."

View File

@ -0,0 +1,167 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
logdir=${TOPDIR}/logs
pro_pylibs_dir=${INSTALLPATH}/pro/python
seafevents_conf=${TOPDIR}/conf/seafevents.conf
seafile_background_tasks_log=${logdir}/seafile-background-tasks.log
seahub_dir=${INSTALLPATH}/seahub
central_config_dir=${TOPDIR}/conf
export SEAHUB_DIR=${seahub_dir}
export PATH=${INSTALLPATH}/seafile/bin:$PATH
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
script_name=$0
function usage () {
echo "Usage: "
echo
echo " $(basename "${script_name}") { start <port> | stop | restart <port> }"
echo
echo ""
}
# Check args
if [[ $1 != "start" && $1 != "stop" && $1 != "restart" ]]; then
usage;
exit 1;
fi
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python3 2>/dev/null 1>&2; then
PYTHON=python3
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
echo
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
echo
exit 1
else
PYTHON="python"$(python --version | cut -b 8-10)
if !which $PYTHON 2>/dev/null 1>&2; then
echo
echo "Can't find a python executable of $PYTHON in PATH"
echo "Install $PYTHON before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
fi
}
function validate_seafile_data_dir () {
if [[ ! -d ${default_seafile_data_dir} ]]; then
echo "Error: there is no seafile server data directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit 1;
fi
pidfile=${TOPDIR}/pids/seafile-background-tasks.pid
}
function ensure_single_instance () {
if pgrep -f "seafevents.background_tasks" 2>/dev/null 1>&2; then
echo "seafile background tasks is already running."
exit 1;
fi
}
function warning_if_seafile_not_running () {
if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
echo
echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?"
echo
fi
}
function prepare_log_dir() {
if ! [[ -d ${logsdir} ]]; then
if ! mkdir -p "${logdir}"; then
echo "ERROR: failed to create logs dir \"${logdir}\""
exit 1
fi
fi
}
function before_start() {
warning_if_seafile_not_running;
ensure_single_instance;
prepare_log_dir;
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
export SEAFILE_CONF_DIR=${default_seafile_data_dir}
export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir
export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/
export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts
# Allow LDAP user sync to import seahub_settings.py
export PYTHONPATH=$PYTHONPATH:${central_config_dir}
export SEAFES_DIR=$pro_pylibs_dir/seafes
}
function start_seafile_background_tasks () {
before_start;
echo "Starting seafile background tasks ..."
$PYTHON -m seafevents.background_tasks --config-file "${seafevents_conf}" \
--loglevel debug --logfile "${seafile_background_tasks_log}" -P "${pidfile}" 2>/dev/null 1>&2 &
# Ensure started successfully
sleep 5
if ! pgrep -f "seafevents.background_tasks" >/dev/null; then
printf "\033[33mError: failed to start seafile background tasks.\033[m\n"
echo "Please try to run \"./seafile-background-tasks.sh start\" again"
exit 1;
fi
}
function stop_seafile_background_tasks () {
if [[ -f ${pidfile} ]]; then
pid=$(cat "${pidfile}")
echo "Stopping seafile background tasks ..."
kill "${pid}"
sleep 1
if ps "${pid}" 2>/dev/null 1>&2 ; then
kill -KILL "${pid}"
fi
pkill -f "soffice.*--invisible --nocrashreport"
rm -f "${pidfile}"
return 0
else
echo "seafile background tasks is not running"
fi
}
check_python_executable;
validate_seafile_data_dir;
case $1 in
"start" )
start_seafile_background_tasks;
;;
"stop" )
stop_seafile_background_tasks;
;;
"restart" )
stop_seafile_background_tasks
sleep 2
start_seafile_background_tasks
;;
esac
echo "Done."
echo ""

View File

@ -1,7 +1,8 @@
import os
daemon = True
workers = 5
workers = 3
threads = 5
# Logging
runtime_dir = os.path.dirname(__file__)

View File

@ -0,0 +1,22 @@
CREATE TABLE IF NOT EXISTS `pubfile_grouppublicfile` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`group_id` int(11) NOT NULL,
`repo_id` varchar(36) NOT NULL,
`path` varchar(4096) NOT NULL,
`is_dir` tinyint(1) NOT NULL,
`added_by` varchar(256) NOT NULL,
`description` varchar(1024) NOT NULL,
`download_count` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `pubfile_grouppublicfile_dc00373b` (`group_id`)
) ENGINE=InnoDB;
CREATE TABLE IF NOT EXISTS `sysadmin_extra_userloginlog` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(255) NOT NULL,
`login_date` datetime NOT NULL,
`login_ip` varchar(20) NOT NULL,
PRIMARY KEY (`id`),
KEY `sysadmin_extra_userloginlog_ee0cafa2` (`username`),
KEY `sysadmin_extra_userloginlog_c8db99ec` (`login_date`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View File

@ -0,0 +1,21 @@
CREATE TABLE IF NOT EXISTS "pubfile_grouppublicfile" (
"id" integer NOT NULL PRIMARY KEY,
"group_id" integer NOT NULL,
"repo_id" varchar(36) NOT NULL,
"path" varchar(4096) NOT NULL,
"is_dir" bool NOT NULL,
"added_by" varchar(256) NOT NULL,
"description" varchar(1024) NOT NULL,
"download_count" integer NOT NULL
)
;
CREATE INDEX IF NOT EXISTS "pubfile_grouppublicfile_dc00373b" ON "pubfile_grouppublicfile" ("group_id");
CREATE TABLE IF NOT EXISTS "sysadmin_extra_userloginlog" (
"id" integer NOT NULL PRIMARY KEY,
"username" varchar(255) NOT NULL,
"login_date" datetime NOT NULL,
"login_ip" varchar(20) NOT NULL
);
CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_c8db99ec" ON "sysadmin_extra_userloginlog" ("login_date");
CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_ee0cafa2" ON "sysadmin_extra_userloginlog" ("username");

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
"""Lifted from:
http://stackoverflow.com/questions/18671/quick-easy-way-to-migrate-sqlite3-to-mysql

View File

@ -0,0 +1,41 @@
#coding: UTF-8
import os
import sys
import configparser
def main():
cfg = configparser.ConfigParser()
seafile_conf_dir = os.environ['SEAFILE_CONF_DIR']
seafile_conf = os.path.join(seafile_conf_dir, 'seafile.conf')
cfg.read(seafile_conf)
sections_map = {
'blocks': 'block_backend',
'fs': 'fs_object_backend',
'commits': 'commit_object_backend',
}
backends = {}
for name, section in sections_map.items():
if cfg.has_option(section, 'name'):
backend_name = cfg.get(section, 'name')
else:
backend_name = 'fs'
backends[name] = backend_name
if any([ bend == 's3' for bend in list(backends.values()) ]):
print('s3')
return
if any([ bend == 'ceph' for bend in list(backends.values()) ]):
print('ceph')
return
if __name__ == '__main__':
try:
main()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.flush()

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import sqlite3
import os

View File

@ -25,6 +25,7 @@ class EnvManager(object):
self.ccnet_dir = os.environ['CCNET_CONF_DIR']
self.seafile_dir = os.environ['SEAFILE_CONF_DIR']
self.central_config_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR')
self.seafevents_db_dir = os.path.join(os.path.dirname(self.install_path), 'pro-data')
env_mgr = EnvManager()
@ -77,6 +78,7 @@ class DBUpdater(object):
self.sql_dir = os.path.join(env_mgr.upgrade_dir, 'sql', version, name)
pro_path = os.path.join(env_mgr.install_path, 'pro')
self.is_pro = os.path.exists(pro_path)
self.version = version
@staticmethod
def get_instance(version):
@ -269,7 +271,7 @@ class SQLiteDBUpdater(DBUpdater):
self.ccnet_db = CcnetSQLiteDB(env_mgr.ccnet_dir)
self.seafile_db = os.path.join(env_mgr.seafile_dir, 'seafile.db')
self.seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db')
self.seafevents_db = os.path.join(env_mgr.top_dir, 'seafevents.db')
self.seafevents_db = os.path.join(env_mgr.seafevents_db_dir, 'seafevents.db')
def update_db(self):
super(SQLiteDBUpdater, self).update_db()
@ -338,7 +340,7 @@ class MySQLDBUpdater(DBUpdater):
try:
conn = pymysql.connect(**kw)
except Exception as e:
if isinstance(e, pymysql.err.OperationalError):
if isinstance(e, pymysql.OperationalError):
msg = str(e.args[1])
else:
msg = str(e)

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import os
import sys

View File

@ -19,6 +19,7 @@ dest_avatar_dir=${TOPDIR}/seahub-data/avatars
seafile_server_symlink=${TOPDIR}/seafile-server-latest
default_conf_dir=${TOPDIR}/conf
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
seahub_data_dir=${TOPDIR}/seahub-data
elasticsearch_config_file=${seafile_server_symlink}/pro/elasticsearch/config/jvm.options
@ -115,23 +116,12 @@ function move_old_elasticsearch_config_to_latest() {
fi
}
function read_seafile_data_dir() {
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
if [[ -f ${seafile_ini} ]]; then
seafile_data_dir=$(cat "${seafile_ini}")
if [[ ! -d ${seafile_data_dir} ]]; then
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
echo "Please check it first, or create this directory yourself."
echo ""
exit 1;
else
if [[ ${seafile_data_dir} != ${TOPDIR}/seafile-data ]]; then
if [[ ! -L ${TOPDIR}/seafile-data ]]; then
ln -s ${seafile_data_dir} ${TOPDIR}/seafile-data
echo "Created the symlink ${TOPDIR}/seafile-data for ${seafile_data_dir}."
fi
fi
fi
function validate_seafile_data_dir() {
if [[ ! -d ${default_seafile_data_dir} ]]; then
echo "Error: there is no seafile server data directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit 1;
fi
}
@ -151,7 +141,7 @@ function rename_gunicorn_config() {
fi
}
read_seafile_data_dir;
validate_seafile_data_dir;
rename_gunicorn_config;
migrate_avatars;

View File

@ -11,3 +11,13 @@ CREATE TABLE IF NOT EXISTS `api2_tokenv2` (
PRIMARY KEY (`key`),
UNIQUE KEY `user` (`user`,`platform`,`device_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `sysadmin_extra_userloginlog` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(255) NOT NULL,
`login_date` datetime NOT NULL,
`login_ip` varchar(20) NOT NULL,
PRIMARY KEY (`id`),
KEY `sysadmin_extra_userloginlog_ee0cafa2` (`username`),
KEY `sysadmin_extra_userloginlog_c8db99ec` (`login_date`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8

View File

@ -10,3 +10,12 @@ CREATE TABLE IF NOT EXISTS "api2_tokenv2" (
"last_login_ip" char(39),
UNIQUE ("user", "platform", "device_id")
);
CREATE TABLE IF NOT EXISTS "sysadmin_extra_userloginlog" (
"id" integer NOT NULL PRIMARY KEY,
"username" varchar(255) NOT NULL,
"login_date" datetime NOT NULL,
"login_ip" varchar(20) NOT NULL
);
CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_c8db99ec" ON "sysadmin_extra_userloginlog" ("login_date");
CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_ee0cafa2" ON "sysadmin_extra_userloginlog" ("username");

View File

@ -81,4 +81,8 @@ CREATE TABLE IF NOT EXISTS `role_permissions_adminrole` (
ALTER TABLE `sysadmin_extra_userloginlog` ADD COLUMN `login_success` tinyint(1) NOT NULL default 1;
ALTER TABLE `profile_profile` ADD COLUMN `list_in_address_book` tinyint(1) NOT NULL default 0;
ALTER TABLE `profile_profile` ADD INDEX `profile_profile_3d5d3631` (`list_in_address_book`);
ALTER TABLE `profile_profile` ADD INDEX `profile_profile_3d5d3631` (`list_in_address_book`);
ALTER TABLE `FileAudit` ADD INDEX `fileaudit_timestamp` (`timestamp`);
ALTER TABLE `Event` ADD INDEX `event_timestamp` (`timestamp`);
ALTER TABLE `UserTrafficStat` ADD INDEX `usertrafficstat_timestamp` (`month`);
ALTER TABLE `FileUpdate` ADD INDEX `fileupdate_timestamp` (`timestamp`);

View File

@ -10,13 +10,17 @@ ALTER TABLE RepoOwner DROP primary key;
ALTER TABLE RepoOwner ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE RepoOwner ADD UNIQUE (repo_id);
ALTER TABLE RepoGroup DROP primary key;
ALTER TABLE RepoGroup ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE RepoGroup ADD UNIQUE (group_id, repo_id);
ALTER TABLE InnerPubRepo DROP primary key;
ALTER TABLE InnerPubRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE InnerPubRepo ADD UNIQUE (repo_id);
ALTER TABLE RepoUserToken DROP primary key;
ALTER TABLE RepoUserToken ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE RepoUserToken ADD UNIQUE (repo_id, token);
ALTER TABLE RepoTokenPeerInfo DROP primary key;
ALTER TABLE RepoTokenPeerInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
@ -78,10 +82,55 @@ ALTER TABLE OrgUserQuota DROP primary key;
ALTER TABLE OrgUserQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE OrgUserQuota ADD UNIQUE (org_id, user);
ALTER TABLE SystemInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE Branch DROP primary key;
ALTER TABLE Branch ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE Branch ADD UNIQUE (repo_id, name);
ALTER TABLE SeafileConf ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE FileLocks ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE OrgRepo DROP primary key;
ALTER TABLE OrgRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE OrgRepo ADD UNIQUE (org_id, repo_id);
ALTER TABLE OrgGroupRepo DROP primary key;
ALTER TABLE OrgGroupRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE OrgGroupRepo ADD UNIQUE (org_id, group_id, repo_id);
ALTER TABLE OrgInnerPubRepo DROP primary key;
ALTER TABLE OrgInnerPubRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE OrgInnerPubRepo ADD UNIQUE (org_id, repo_id);
ALTER TABLE RepoSyncError DROP primary key;
ALTER TABLE RepoSyncError ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE RepoSyncError ADD UNIQUE (token);
ALTER TABLE GCID DROP primary key;
ALTER TABLE GCID ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE GCID ADD UNIQUE (repo_id);
ALTER TABLE LastGCID DROP primary key;
ALTER TABLE LastGCID ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE LastGCID ADD UNIQUE (repo_id, client_id);
ALTER TABLE FolderUserPerm ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE FolderGroupPerm ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE FolderPermTimestamp DROP primary key;
ALTER TABLE FolderPermTimestamp ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE FolderPermTimestamp ADD UNIQUE (repo_id);
ALTER TABLE WebUploadTempFiles ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE RepoStorageId ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE RoleQuota DROP primary key;
ALTER TABLE RoleQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
ALTER TABLE RoleQuota ADD UNIQUE (role);
CREATE TABLE IF NOT EXISTS OrgSharedRepo (id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,org_id INT, repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15), INDEX (org_id, repo_id), INDEX(from_email), INDEX(to_email)) ENGINE=INNODB;
ALTER TABLE OrgSharedRepo ADD INDEX(repo_id);
ALTER TABLE OrgRepo ADD INDEX(user);

View File

@ -168,3 +168,8 @@ ALTER TABLE notifications_notification ADD INDEX `notifications_notification_386
ALTER TABLE institutions_institutionadmin ADD INDEX `institutions_institutionadmin_user_7560167c8413ff0e_uniq` (`user`);
ALTER TABLE `post_office_attachment` add column `mimetype` varchar(255) NOT NULL;
ALTER TABLE Event ADD INDEX `ix_event_timestamp` (`timestamp`);
ALTER TABLE FileAudit ADD INDEX `ix_FileAudit_timestamp` (`timestamp`);
ALTER TABLE FileUpdate ADD INDEX `ix_FileUpdate_timestamp` (`timestamp`);
ALTER TABLE UserTrafficStat ADD INDEX `ix_UserTrafficStat_month` (`month`);

View File

@ -132,7 +132,7 @@ function upgrade_seafile_server_latest_symlink() {
seafile_server_symlink=${TOPDIR}/seafile-server-latest
if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then
echo
printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n"
printf "updating \033[32m${seafile_server_symlink}\033[m symbolic link to \033[32m${INSTALLPATH}\033[m ...\n\n"
echo
if ! rm -f "${seafile_server_symlink}"; then
echo "Failed to remove ${seafile_server_symlink}"
@ -148,10 +148,27 @@ function upgrade_seafile_server_latest_symlink() {
fi
}
function show_notice_for_s3_ceph_user() {
echo "-----------------------------------------------------------------"
echo "Important: You are using ${backend} storage, please follow the following "
echo "upgrade notice to migrate your data to 3.0 format"
echo
echo " http://seacloud.cc/group/180/wiki/seafile-pro-3.0-upgrade-notice/"
echo "-----------------------------------------------------------------"
echo
echo
}
check_backend_py=${UPGRADE_DIR}/check_backend.py
backend=
function migrate_seafile_data_format() {
backend=$($PYTHON ${check_backend_py})
if [[ "${backend}" == "s3" || "${backend}" == "ceph" ]]; then
return
fi
seaf_migrate=${INSTALLPATH}/seafile/bin/seaf-migrate
echo
echo "now migrating seafile data to 3.0 format"
echo "Now migrating your seafile data to 3.0 format. It may take a while."
echo
if ! LD_LIBRARY_PATH=${SEAFILE_LD_LIBRARY_PATH} ${seaf_migrate} \
-c "${default_ccnet_conf_dir}" -d "${seafile_data_dir}"; then
@ -183,9 +200,12 @@ update_database;
upgrade_seafile_server_latest_symlink;
echo
echo "-----------------------------------------------------------------"
echo "Upgraded your seafile server successfully."
echo "-----------------------------------------------------------------"
echo
if [[ "${backend}" == "s3" || "${backend}" == "ceph" ]]; then
show_notice_for_s3_ceph_user;
else
echo
echo "-----------------------------------------------------------------"
echo "Upgraded your seafile server successfully."
echo "-----------------------------------------------------------------"
echo
fi

View File

@ -226,6 +226,7 @@ chmod 0600 "$seahub_settings_py"
chmod 0700 "$seafile_data_dir"
chmod 0700 "$default_ccnet_conf_dir"
chmod 0700 "$default_conf_dir"
chmod 0700 "$TOPDIR"/pro-data
echo
echo "-----------------------------------------------------------------"

View File

@ -192,6 +192,13 @@ function move_old_customdir_outside() {
cp -rf "${old_customdir}" "${seahub_data_dir}/"
}
function remove_es_index() {
local es_data_dir=$TOPDIR/pro-data/search/data
echo -n "Removing old search index ... "
rm -rf $es_data_dir && mkdir -p $es_data_dir
echo "Done"
}
#################
# The main execution flow of the script
################
@ -202,6 +209,9 @@ ensure_server_not_running;
update_database;
# We changed elasticsearch index settings in 4.2.0, need to recreate the index.
remove_es_index;
migrate_avatars;

View File

@ -200,6 +200,17 @@ function regenerate_secret_key() {
fi
}
function remove_es_index() {
local es_data_dir=$TOPDIR/pro-data/search/data
echo -n "Removing old search index ... "
rm -rf $es_data_dir && mkdir -p $es_data_dir
echo "Done"
}
function remove_office_files() {
rm -rf /tmp/seafile-office-output/html/*
}
#################
# The main execution flow of the script
################
@ -212,6 +223,10 @@ regenerate_secret_key;
update_database;
# We changed elasticsearch index settings in 4.3.0, need to recreate the index.
remove_es_index;
remove_office_files;
migrate_avatars;

View File

@ -9,6 +9,7 @@ default_conf_dir=${TOPDIR}/conf
seafile_server_symlink=${TOPDIR}/seafile-server-latest
seahub_data_dir=${TOPDIR}/seahub-data
seahub_settings_py=${TOPDIR}/seahub_settings.py
pro_data_dir=${TOPDIR}/pro-data
manage_py=${INSTALLPATH}/seahub/manage.py
@ -205,6 +206,7 @@ function copy_confs_to_central_conf_dir() {
$default_ccnet_conf_dir/ccnet.conf
$seafile_data_dir/seafile.conf
$seahub_settings_py
$pro_data_dir/seafevents.conf
)
for conffile in ${confs[*]}; do
if grep -q "This file has been moved" $conffile; then