diff --git a/scripts/build/build-pro.py b/scripts/build/build-pro.py new file mode 100755 index 0000000000..f95d5d22b9 --- /dev/null +++ b/scripts/build/build-pro.py @@ -0,0 +1,1127 @@ +#!/usr/bin/env python3 +# coding: UTF-8 +'''This scirpt builds the Seafile Server Profession tarball. + +Some notes: + +1. The working directory is always the 'builddir'. 'os.chdir' is only called +to change to the 'builddir'. We make use of the 'cwd' argument in +'subprocess.Popen' to run a command in a specific directory. + +2. django/djangorestframework/djblets/gunicorn/flup must be easy_install-ed to +a directory before run this script. That directory is passed in as the +'--thirdpartdir' arguments. + +3. These components must be easy_installed to a --prolibsdir + - sqlalchemy + - thrift + - elasticsearch + - elasticsearch-dsl + - argparse + - python-daemon + - lockfile +''' +import sys + +#################### +### Requires Python 3 +#################### +if sys.version_info[0] != 3: + print('Python 3 is required. Quit now.') + sys.exit(1) + +import os +import glob +import subprocess +import tempfile +import shutil +import re +import subprocess +import optparse +import atexit + +#################### +### Global variables +#################### + +# command line configuartion +conf = {} + +# key names in the conf dictionary. +CONF_VERSION = 'version' +CONF_SEAFILE_VERSION = 'seafile_version' +CONF_LIBSEARPC_VERSION = 'libsearpc_version' +CONF_CCNET_VERSION = 'ccnet_version' +CONF_SRCDIR = 'srcdir' +CONF_KEEP = 'keep' +CONF_BUILDDIR = 'builddir' +CONF_OUTPUTDIR = 'outputdir' +CONF_THIRDPARTDIR = 'thirdpartdir' +CONF_PROLIBSDIR = 'prolibsdir' +CONF_NO_STRIP = 'nostrip' +CONF_NO_CEPH = 'no-s3' +CONF_YES = 'yes' +CONF_JOBS = 'jobs' +CONF_MYSQL_CONFIG = 'mysql_config' +CONF_BRAND = 'brand' + +#################### +### Common helper functions +#################### +def highlight(content, is_error=False): + '''Add ANSI color to content to get it highlighted on terminal''' + if is_error: + return '\x1b[1;31m%s\x1b[m' % content + else: + return '\x1b[1;32m%s\x1b[m' % content + + +def info(msg): + print(highlight('[INFO] ') + msg) + + +def find_in_path(prog): + '''Find a file in system path''' + dirs = os.environ['PATH'].split(':') + for d in dirs: + if d == '': + continue + path = os.path.join(d, prog) + if os.path.exists(path): + return path + + return None + + +def error(msg=None, usage=None): + if msg: + print(highlight('[ERROR] ') + msg) + if usage: + print(usage) + sys.exit(1) + + +def run_argv(argv, + cwd=None, + env=None, + suppress_stdout=False, + suppress_stderr=False): + '''Run a program and wait it to finish, and return its exit code. The + standard output of this program is supressed. + + ''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(argv, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env) + return proc.wait() + + +def run(cmdline, + cwd=None, + env=None, + suppress_stdout=False, + suppress_stderr=False): + '''Like run_argv but specify a command line string instead of argv''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(cmdline, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env, + shell=True) + return proc.wait() + + +def must_mkdir(path): + '''Create a directory, exit on failure''' + if os.path.exists(path): + return + + try: + os.makedirs(path) + except OSError as e: + error('failed to create directory %s:%s' % (path, e)) + + +def must_copy(src, dst): + '''Copy src to dst, exit on failure''' + try: + shutil.copy(src, dst) + except Exception as e: + error('failed to copy %s to %s: %s' % (src, dst, e)) + + +def must_copytree(src, dst): + '''must_copytree(a, b) copies every file/dir under a/ to b/''' + try: + for name in os.listdir(src): + src_path = os.path.join(src, name) + target_path = os.path.join(dst, name) + if os.path.isdir(src_path): + shutil.copytree(src_path, target_path) + else: + shutil.copy(src_path, target_path) + except Exception as e: + error('failed to copy seahub thirdpart libs: %s' % e) + + +class Project(object): + '''Base class for a project''' + # Probject name, i.e. libseaprc/ccnet/seafile/seahub + name = '' + + # A list of shell commands to configure/build the project + build_commands = [] + + def __init__(self): + # the path to pass to --prefix=/ + self.prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', + 'seafile') + self.version = self.get_version() + self.src_tarball = os.path.join(conf[CONF_SRCDIR], '%s-%s.tar.gz' % + (self.name, self.version)) + # project dir, like /seafile-1.2.2/ + self.projdir = os.path.join(conf[CONF_BUILDDIR], '%s-%s' % + (self.name, self.version)) + + def get_version(self): + # libsearpc and ccnet can have different versions from seafile. + raise NotImplementedError + + def uncompress(self): + '''Uncompress the source from the tarball''' + info('Uncompressing %s' % self.name) + + if run('tar xf %s' % self.src_tarball) < 0: + error('failed to uncompress source of %s' % self.name) + + def build(self): + '''Build the source''' + info('Building %s' % self.name) + for cmd in self.build_commands: + if run(cmd, cwd=self.projdir) != 0: + error('error when running command:\n\t%s\n' % cmd) + + +class Libsearpc(Project): + name = 'libsearpc' + + def __init__(self): + Project.__init__(self) + self.build_commands = [ + './configure --prefix=%s' % self.prefix, + 'make -j%s' % conf[CONF_JOBS], 'make install' + ] + + def get_version(self): + return conf[CONF_LIBSEARPC_VERSION] + + +class Ccnet(Project): + name = 'ccnet' + + def __init__(self): + Project.__init__(self) + configure_command = './configure --prefix=%s --enable-ldap' % self.prefix + if conf[CONF_MYSQL_CONFIG]: + configure_command += ' --with-mysql=%s' % conf[CONF_MYSQL_CONFIG] + self.build_commands = [ + configure_command, + 'make -j%s' % conf[CONF_JOBS], + 'make install' + ] + + def get_version(self): + return conf[CONF_CCNET_VERSION] + + +class Seafile(Project): + name = 'seafile' + + def __init__(self): + Project.__init__(self) + + configure_command = './configure --prefix=%s --enable-cluster --enable-s3 --enable-ceph' % self.prefix + if conf[CONF_MYSQL_CONFIG]: + configure_command += ' --with-mysql=%s' % conf[CONF_MYSQL_CONFIG] + self.build_commands = [ + configure_command, + 'make -j%s' % conf[CONF_JOBS], + 'make install' + ] + + def get_version(self): + return conf[CONF_SEAFILE_VERSION] + + +class Seahub(Project): + name = 'seahub' + + def __init__(self): + Project.__init__(self) + # nothing to do for seahub + self.build_commands = [] + + def get_version(self): + return conf[CONF_SEAFILE_VERSION] + + def build(self): + self.write_version_to_settings_py() + + Project.build(self) + + def write_version_to_settings_py(self): + '''Write the version of current seafile server to seahub''' + settings_py = os.path.join(self.projdir, 'seahub', 'settings.py') + + line = '\nSEAFILE_VERSION = "%s"\n' % conf[CONF_VERSION] + with open(settings_py, 'a+') as fp: + fp.write(line) + + +def check_seahub_thirdpart(thirdpartdir): + '''The ${thirdpartdir} must have django/djblets/gunicorn pre-installed. So + we can copy it to seahub/thirdpart + + ''' + thirdpart_libs = [ + 'Django', +# 'Djblets', + 'gunicorn', + #'flup', + 'chardet', + 'python_dateutil', + #'django_picklefield', + #'django_constance', + # 'SQLAlchemy', + # 'python_daemon', + # 'lockfile', + 'six', + ] + + def check_thirdpart_lib(name): + name += '*' + if not glob.glob(os.path.join(thirdpartdir, name)): + error('%s not find in %s' % (name, thirdpartdir)) + + for lib in thirdpart_libs: + check_thirdpart_lib(lib) + + +def check_pro_libs(prolibsdir): + '''The ${prolibsdir} must have pro libs installed.''' + pro_libs = [ + 'argparse', + 'elasticsearch_dsl', + 'SQLAlchemy', + 'thrift', + ] + + def check_pro_lib(name): + name += '*' + if not glob.glob(os.path.join(prolibsdir, name)): + error('%s not find in %s' % (name, prolibsdir)) + + for lib in pro_libs: + check_pro_lib(lib) + + +def check_targz_src(proj, version, srcdir): + src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version)) + if not os.path.exists(src_tarball): + error('%s not exists' % src_tarball) + + +def check_targz_src_no_version(proj, srcdir): + src_tarball = os.path.join(srcdir, '%s.tar.gz' % proj) + if not os.path.exists(src_tarball): + error('%s not exists' % src_tarball) + + +def check_pdf2htmlEX(): + pdf2htmlEX_executable = find_in_path('pdf2htmlEX') + if pdf2htmlEX_executable is None: + error('pdf2htmlEX not found') + + +def validate_args(usage, options): + required_args = [ + CONF_VERSION, + CONF_LIBSEARPC_VERSION, + CONF_CCNET_VERSION, + CONF_SEAFILE_VERSION, + CONF_SRCDIR, + CONF_THIRDPARTDIR, + CONF_PROLIBSDIR, + ] + + # fist check required args + for optname in required_args: + if getattr(options, optname, None) == None: + error('%s must be specified' % optname, usage=usage) + + def get_option(optname): + return getattr(options, optname) + + # [ version ] + def check_project_version(version): + '''A valid version must be like 1.2.2, 1.3''' + if not re.match('^([0-9])+(\.([0-9])+)+$', version): + error('%s is not a valid version' % version, usage=usage) + + version = get_option(CONF_VERSION) + seafile_version = get_option(CONF_SEAFILE_VERSION) + libsearpc_version = get_option(CONF_LIBSEARPC_VERSION) + ccnet_version = get_option(CONF_CCNET_VERSION) + + check_project_version(version) + check_project_version(libsearpc_version) + check_project_version(ccnet_version) + check_project_version(seafile_version) + + # [ srcdir ] + srcdir = get_option(CONF_SRCDIR) + check_targz_src('libsearpc', libsearpc_version, srcdir) + check_targz_src('ccnet', ccnet_version, srcdir) + check_targz_src('seafile', seafile_version, srcdir) + check_targz_src('seahub', seafile_version, srcdir) + + check_targz_src_no_version('seafes', srcdir) + check_targz_src_no_version('seafevents', srcdir) + check_targz_src_no_version('seahub-extra', srcdir) + check_targz_src_no_version('libevent', srcdir) + check_targz_src_no_version('elasticsearch', srcdir) + check_targz_src_no_version('seafdav', srcdir) + check_targz_src_no_version('seafobj', srcdir) + + check_pdf2htmlEX() + + # [ builddir ] + builddir = get_option(CONF_BUILDDIR) + if not os.path.exists(builddir): + error('%s does not exist' % builddir, usage=usage) + + builddir = os.path.join(builddir, 'seafile-pro-server-build') + + # [ thirdpartdir ] + thirdpartdir = get_option(CONF_THIRDPARTDIR) + check_seahub_thirdpart(thirdpartdir) + + # [ prolibsdir ] + prolibsdir = get_option(CONF_PROLIBSDIR) + check_pro_libs(prolibsdir) + + # [ outputdir ] + outputdir = get_option(CONF_OUTPUTDIR) + if outputdir: + if not os.path.exists(outputdir): + error('outputdir %s does not exist' % outputdir, usage=usage) + else: + outputdir = os.getcwd() + + # [ keep ] + keep = get_option(CONF_KEEP) + + # [ no strip] + nostrip = get_option(CONF_NO_STRIP) + + # [ YES ] + yes = get_option(CONF_YES) + + # [ JOBS ] + jobs = get_option(CONF_JOBS) + + # [no ceph] + no_ceph = get_option(CONF_NO_CEPH) + + mysql_config_path = get_option(CONF_MYSQL_CONFIG) + + brand = get_option(CONF_BRAND) + + conf[CONF_VERSION] = version + conf[CONF_LIBSEARPC_VERSION] = libsearpc_version + conf[CONF_SEAFILE_VERSION] = seafile_version + conf[CONF_CCNET_VERSION] = ccnet_version + + conf[CONF_BUILDDIR] = builddir + conf[CONF_SRCDIR] = srcdir + conf[CONF_OUTPUTDIR] = outputdir + conf[CONF_KEEP] = keep + conf[CONF_THIRDPARTDIR] = thirdpartdir + conf[CONF_PROLIBSDIR] = prolibsdir + conf[CONF_NO_STRIP] = nostrip + conf[CONF_YES] = yes + conf[CONF_JOBS] = jobs + conf[CONF_NO_CEPH] = no_ceph + conf[CONF_MYSQL_CONFIG] = mysql_config_path + conf[CONF_BRAND] = brand + + if os.path.exists(builddir): + error('the builddir %s already exists' % builddir) + + show_build_info() + prepare_builddir(builddir) + + +def show_build_info(): + '''Print all conf information. Confirm before continue.''' + info('------------------------------------------') + info('Seafile Server Professional %s: BUILD INFO' % conf[CONF_VERSION]) + info('------------------------------------------') + info('seafile: %s' % conf[CONF_SEAFILE_VERSION]) + info('ccnet: %s' % conf[CONF_CCNET_VERSION]) + info('libsearpc: %s' % conf[CONF_LIBSEARPC_VERSION]) + info('builddir: %s' % conf[CONF_BUILDDIR]) + info('outputdir: %s' % conf[CONF_OUTPUTDIR]) + info('source dir: %s' % conf[CONF_SRCDIR]) + info('thirdpart dir: %s' % conf[CONF_THIRDPARTDIR]) + info('pro libs dir: %s' % conf[CONF_PROLIBSDIR]) + info('ceph support: %s' % (not conf[CONF_NO_CEPH])) + info('strip symbols: %s' % (not conf[CONF_NO_STRIP])) + info('jobs: %s' % conf[CONF_JOBS]) + info('clean on exit: %s' % (not conf[CONF_KEEP])) + if conf[CONF_YES]: + return + info('------------------------------------------') + info('press any key to continue ') + info('------------------------------------------') + dummy = input() + + +def prepare_builddir(builddir): + must_mkdir(builddir) + + if not conf[CONF_KEEP]: + + def remove_builddir(): + '''Remove the builddir when exit''' + info('remove builddir before exit') + shutil.rmtree(builddir, ignore_errors=True) + + atexit.register(remove_builddir) + + os.chdir(builddir) + + must_mkdir(os.path.join(builddir, 'seafile-server')) + must_mkdir(os.path.join(builddir, 'seafile-server', 'seafile')) + + +def parse_args(): + parser = optparse.OptionParser() + + def long_opt(opt): + return '--' + opt + + parser.add_option( + long_opt(CONF_THIRDPARTDIR), + dest=CONF_THIRDPARTDIR, + nargs=1, + help='where to find the thirdpart libs for seahub') + + parser.add_option( + long_opt(CONF_PROLIBSDIR), + dest=CONF_PROLIBSDIR, + nargs=1, + help='where to find the python libs for seafile professional') + + parser.add_option( + long_opt(CONF_VERSION), + dest=CONF_VERSION, + nargs=1, + help= + 'the version to build. Must be digits delimited by dots, like 1.3.0') + + parser.add_option( + long_opt(CONF_SEAFILE_VERSION), + dest=CONF_SEAFILE_VERSION, + nargs=1, + help= + 'the version of seafile as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0') + + parser.add_option( + long_opt(CONF_LIBSEARPC_VERSION), + dest=CONF_LIBSEARPC_VERSION, + nargs=1, + help= + 'the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0') + + parser.add_option( + long_opt(CONF_CCNET_VERSION), + dest=CONF_CCNET_VERSION, + nargs=1, + help= + 'the version of ccnet as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0') + + parser.add_option( + long_opt(CONF_BUILDDIR), + dest=CONF_BUILDDIR, + nargs=1, + help='the directory to build the source. Defaults to /tmp', + default=tempfile.gettempdir()) + + parser.add_option( + long_opt(CONF_OUTPUTDIR), + dest=CONF_OUTPUTDIR, + nargs=1, + help= + 'the output directory to put the generated server tarball. Defaults to the current directory.', + default=os.getcwd()) + + parser.add_option( + long_opt(CONF_SRCDIR), + dest=CONF_SRCDIR, + nargs=1, + help='''Source tarballs must be placed in this directory.''') + + parser.add_option( + long_opt(CONF_KEEP), + dest=CONF_KEEP, + action='store_true', + help= + '''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''') + + parser.add_option( + long_opt(CONF_NO_STRIP), + dest=CONF_NO_STRIP, + action='store_true', + help='''do not strip debug symbols''') + + parser.add_option( + long_opt(CONF_YES), + dest=CONF_YES, + action='store_true', + help='''assume yes to all questions''') + + parser.add_option(long_opt(CONF_JOBS), dest=CONF_JOBS, default=2, type=int) + + parser.add_option( + long_opt(CONF_NO_CEPH), + dest=CONF_NO_CEPH, + action='store_true', + help='''do not enable storage backends''') + + parser.add_option(long_opt(CONF_MYSQL_CONFIG), + dest=CONF_MYSQL_CONFIG, + nargs=1, + help='''Absolute path to mysql_config or mariadb_config program.''') + + parser.add_option(long_opt(CONF_BRAND), + dest=CONF_BRAND, + default='', + help='''brand name of the package''') + + usage = parser.format_help() + options, remain = parser.parse_args() + if remain: + error(usage=usage) + + validate_args(usage, options) + + +def setup_build_env(): + '''Setup environment variables, such as export PATH=$BUILDDDIR/bin:$PATH''' + prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile') + + def prepend_env_value(name, value, seperator=':'): + '''append a new value to a list''' + try: + current_value = os.environ[name] + except KeyError: + current_value = '' + + new_value = value + if current_value: + new_value += seperator + current_value + + os.environ[name] = new_value + + prepend_env_value('CPPFLAGS', + '-I%s' % os.path.join(prefix, 'include'), + seperator=' ') + + if conf[CONF_NO_STRIP]: + prepend_env_value('CPPFLAGS', '-g -O0', seperator=' ') + + prepend_env_value('LDFLAGS', + '-L%s' % os.path.join(prefix, 'lib'), + seperator=' ') + + prepend_env_value('LDFLAGS', + '-L%s' % os.path.join(prefix, 'lib64'), + seperator=' ') + + prepend_env_value('PATH', os.path.join(prefix, 'bin')) + prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib', + 'pkgconfig')) + prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib64', + 'pkgconfig')) + + +def copy_pro_libs(): + '''Copy pro.py and python libs for Seafile Professional to + seafile-server/pro/python + + ''' + builddir = conf[CONF_BUILDDIR] + pro_program_dir = os.path.join(builddir, 'seafile-server', 'pro') + if not os.path.exists(pro_program_dir): + must_mkdir(pro_program_dir) + + pro_misc_dir = os.path.join(pro_program_dir, 'misc') + if not os.path.exists(pro_misc_dir): + must_mkdir(pro_misc_dir) + + pro_libs_dir = os.path.join(pro_program_dir, 'python') + must_mkdir(pro_libs_dir) + + must_copytree(conf[CONF_PROLIBSDIR], pro_libs_dir) + + pro_py = os.path.join(Seafile().projdir, 'scripts', 'pro.py') + must_copy(pro_py, pro_program_dir) + + seahub_extra_sql_sqlite3 = os.path.join(Seafile().projdir, 'scripts', + 'seahub_extra.sqlite3.sql') + seahub_extra_sql_mysql = os.path.join(Seafile().projdir, 'scripts', + 'seahub_extra.mysql.sql') + must_copy(seahub_extra_sql_sqlite3, pro_misc_dir) + must_copy(seahub_extra_sql_mysql, pro_misc_dir) + + uncompress_seafes_seafevents() + + +def uncompress_seafes_seafevents(): + '''Extract seafes.tar.gz and seafevents.tar.gz, libevent.tar.gz to + seafile-server/pro/python + + ''' + builddir = conf[CONF_BUILDDIR] + pro_libs_dir = os.path.join(builddir, 'seafile-server', 'pro', 'python') + + tarball = os.path.join(conf[CONF_SRCDIR], 'seafes.tar.gz') + if run('tar xf %s -C %s' % (tarball, pro_libs_dir)) != 0: + error('failed to uncompress %s' % tarball) + + tarball = os.path.join(conf[CONF_SRCDIR], 'seafevents.tar.gz') + if run('tar xf %s -C %s' % (tarball, pro_libs_dir)) != 0: + error('failed to uncompress %s' % tarball) + + tarball = os.path.join(conf[CONF_SRCDIR], 'libevent.tar.gz') + if run('tar xf %s -C %s' % (tarball, pro_libs_dir)) != 0: + error('failed to uncompress %s' % tarball) + + +def copy_seafdav(): + dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', + 'thirdpart') + tarball = os.path.join(conf[CONF_SRCDIR], 'seafdav.tar.gz') + if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0: + error('failed to uncompress %s' % tarball) + + dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', + 'thirdpart') + tarball = os.path.join(conf[CONF_SRCDIR], 'seafobj.tar.gz') + if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0: + error('failed to uncompress %s' % tarball) + + +def copy_elasticsearch(): + '''Extract elasticsearch to seafile-server/pro/''' + builddir = conf[CONF_BUILDDIR] + pro_dir = os.path.join(builddir, 'seafile-server', 'pro') + es_tarball = os.path.join(conf[CONF_SRCDIR], 'elasticsearch.tar.gz') + + if run('tar xf %s -C %s' % (es_tarball, pro_dir)) != 0: + error('failed to uncompress elasticsearch') + + +def copy_user_manuals(): + builddir = conf[CONF_BUILDDIR] + src_pattern = os.path.join(builddir, Seafile().projdir, 'doc', + 'seafile-tutorial.doc') + dst_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'docs') + + must_mkdir(dst_dir) + + for path in glob.glob(src_pattern): + must_copy(path, dst_dir) + + +def copy_scripts_and_libs(): + '''Copy server release scripts and shared libs, as well as seahub + thirdpart libs + + ''' + builddir = conf[CONF_BUILDDIR] + scripts_srcdir = os.path.join(builddir, Seafile().projdir, 'scripts') + serverdir = os.path.join(builddir, 'seafile-server') + + must_copy(os.path.join(scripts_srcdir, 'setup-seafile.sh'), serverdir) + must_copy( + os.path.join(scripts_srcdir, 'setup-seafile-mysql.sh'), serverdir) + must_copy( + os.path.join(scripts_srcdir, 'setup-seafile-mysql.py'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'seafile.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'seahub.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'reset-admin.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-fuse.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-gc.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-fsck.sh'), serverdir) + must_copy( + os.path.join(scripts_srcdir, 'seafile-background-tasks.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'check_init_admin.py'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'check-db-type.py'), serverdir) + + # Command line for real-time backup server + must_copy(os.path.join(scripts_srcdir, 'seaf-backup-cmd.py'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-backup-cmd.sh'), serverdir) + # copy seaf-import, store_encrypt related scripts + must_copy(os.path.join(scripts_srcdir, 'seaf-import.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-gen-key.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-encrypt.sh'), serverdir) + + # general migrate script + must_copy(os.path.join(scripts_srcdir, 'migrate.py'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'migrate.sh'), serverdir) + + # general migrate repo script + must_copy(os.path.join(scripts_srcdir, 'migrate-repo.py'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'migrate-repo.sh'), serverdir) + + # general seafes script + must_copy(os.path.join(scripts_srcdir, 'run_index_master.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'run_index_worker.sh'), serverdir) + must_copy(os.path.join(scripts_srcdir, 'index_op.py'), serverdir) + + # copy update scripts + update_scriptsdir = os.path.join(scripts_srcdir, 'upgrade') + dst_update_scriptsdir = os.path.join(serverdir, 'upgrade') + try: + shutil.copytree(update_scriptsdir, dst_update_scriptsdir) + except Exception as e: + error('failed to copy upgrade scripts: %s' % e) + + # copy sql scripts + sql_scriptsdir = os.path.join(scripts_srcdir, 'sql') + dst_sql_scriptsdir = os.path.join(serverdir, 'sql') + try: + shutil.copytree(sql_scriptsdir, dst_sql_scriptsdir) + except Exception as e: + error('failed to copy sql scripts: %s' % e) + + # copy create db sql scripts + create_db_scriptsdir = os.path.join(scripts_srcdir, 'create-db') + dst_create_db_scriptsdir = os.path.join(serverdir, 'create-db') + try: + shutil.copytree(create_db_scriptsdir, dst_create_db_scriptsdir) + except Exception as e: + error('failed to copy create db scripts: %s' % e) + + seahub_oracle_sql_script = os.path.join(Seahub().projdir, 'sql', 'oracle.sql') + must_copy(seahub_oracle_sql_script, os.path.join(dst_create_db_scriptsdir, 'oracle', 'seahub_db.sql')) + + # copy runtime/seahub.conf + runtimedir = os.path.join(serverdir, 'runtime') + must_mkdir(runtimedir) + must_copy(os.path.join(scripts_srcdir, 'seahub.conf'), runtimedir) + + # move seahub to seafile-server/seahub + src_seahubdir = Seahub().projdir + dst_seahubdir = os.path.join(serverdir, 'seahub') + try: + shutil.move(src_seahubdir, dst_seahubdir) + except Exception as e: + error('failed to move seahub to seafile-server/seahub: %s' % e) + + # copy seahub thirdpart libs + seahub_thirdpart = os.path.join(dst_seahubdir, 'thirdpart') + copy_seahub_thirdpart_libs(seahub_thirdpart) + copy_seafdav() + copy_seahub_extra() + + # copy pro libs & elasticsearch + copy_pro_libs() + copy_elasticsearch() + copy_pdf2htmlex() + + # copy shared c libs + copy_shared_libs() + copy_user_manuals() + + +def copy_pdf2htmlex(): + '''Copy pdf2htmlEX exectuable and its dependent libs''' + pdf2htmlEX_executable = find_in_path('pdf2htmlEX') + libs = get_dependent_libs(pdf2htmlEX_executable) + + builddir = conf[CONF_BUILDDIR] + dst_lib_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'lib') + + dst_bin_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'bin') + + for lib in libs: + dst_file = os.path.join(dst_lib_dir, os.path.basename(lib)) + if os.path.exists(dst_file): + continue + info('Copying %s' % lib) + must_copy(lib, dst_lib_dir) + + must_copy(pdf2htmlEX_executable, dst_bin_dir) + + +def get_dependent_libs(executable): + syslibs = ['libsearpc', 'libccnet', 'libseafile', 'libpthread.so', + 'libc.so', 'libm.so', 'librt.so', 'libdl.so', 'libselinux.so', + 'libresolv.so', 'libnss3.so', 'libnssutil3.so', 'libssl3.so'] + + def is_syslib(lib): + for syslib in syslibs: + if syslib in lib: + return True + return False + + ldd_output = subprocess.getoutput('ldd %s' % executable) + if 'not found' in ldd_output: + print(ldd_output) + error('some deps of %s not found' % executable) + ret = set() + for line in ldd_output.splitlines(): + tokens = line.split() + if len(tokens) != 4: + continue + if is_syslib(tokens[0]): + continue + + ret.add(tokens[2]) + + return ret + + +def copy_shared_libs(): + '''copy shared c libs, such as libevent, glib, libmysqlclient''' + builddir = conf[CONF_BUILDDIR] + + dst_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'lib') + + seafile_path = os.path.join(builddir, 'seafile-server', 'seafile', 'bin', + 'seaf-server') + + ccnet_server_path = os.path.join(builddir, 'seafile-server', 'seafile', + 'bin', 'ccnet-server') + + seaf_fuse_path = os.path.join(builddir, 'seafile-server', 'seafile', 'bin', + 'seaf-fuse') + + libs = set() + libs.update(get_dependent_libs(ccnet_server_path)) + libs.update(get_dependent_libs(seafile_path)) + libs.update(get_dependent_libs(seaf_fuse_path)) + + for lib in libs: + dst_file = os.path.join(dst_dir, os.path.basename(lib)) + if os.path.exists(dst_file): + continue + info('Copying %s' % lib) + shutil.copy(lib, dst_dir) + + +def copy_seahub_thirdpart_libs(seahub_thirdpart): + '''copy django/djblets/gunicorn from ${thirdpartdir} to + seahub/thirdpart + + ''' + src = conf[CONF_THIRDPARTDIR] + dst = seahub_thirdpart + + must_copytree(src, dst) + + +def copy_seahub_extra(): + '''uncompress seahub-extra.tar.gz to seafile-server/seahub-extra''' + tarball = os.path.join(conf[CONF_SRCDIR], 'seahub-extra.tar.gz') + builddir = conf[CONF_BUILDDIR] + seahub_dir = os.path.join(builddir, 'seafile-server') + + if run('tar xf %s -C %s' % (tarball, seahub_dir)) != 0: + error('failed to uncompress elasticsearch') + + +def strip_symbols(): + def do_strip(fn): + run('chmod u+w %s' % fn) + info('stripping: %s' % fn) + run('strip "%s"' % fn) + + def remove_static_lib(fn): + info('removing: %s' % fn) + os.remove(fn) + + for parent, dnames, fnames in os.walk('seafile-server/seafile'): + dummy = dnames # avoid pylint 'unused' warning + for fname in fnames: + fn = os.path.join(parent, fname) + if os.path.isdir(fn): + continue + + if fn.endswith(".a") or fn.endswith(".la"): + remove_static_lib(fn) + continue + + if os.path.islink(fn): + continue + + finfo = subprocess.getoutput('file "%s"' % fn) + + if 'not stripped' in finfo: + do_strip(fn) + + +def create_tarball(tarball_name): + '''call tar command to generate a tarball''' + version = conf[CONF_VERSION] + + serverdir = 'seafile-server' + versioned_serverdir = 'seafile-pro-server-' + version + + # move seafile-server to seafile-server-${version} + try: + shutil.move(serverdir, versioned_serverdir) + except Exception as e: + error('failed to move %s to %s: %s' % + (serverdir, versioned_serverdir, e)) + + ignored_patterns = [ + # common ignored files + '*.pyc', + '*~', + '*#', + + # seahub + os.path.join(versioned_serverdir, 'seahub', '.git*'), + os.path.join(versioned_serverdir, 'seahub', 'media', 'flexpaper*'), + os.path.join(versioned_serverdir, 'seahub', 'avatar', 'testdata*'), + + # seafile + os.path.join(versioned_serverdir, 'seafile', 'share*'), + os.path.join(versioned_serverdir, 'seafile', 'include*'), + os.path.join(versioned_serverdir, 'seafile', 'lib', 'pkgconfig*'), + os.path.join(versioned_serverdir, 'seafile', 'lib64', 'pkgconfig*'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'ccnet-demo*'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'ccnet-tool'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'ccnet-servtool'), + os.path.join(versioned_serverdir, 'seafile', 'bin', + 'searpc-codegen.py'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile-admin'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile'), + ] + + excludes_list = ['--exclude=%s' % pattern for pattern in ignored_patterns] + excludes = ' '.join(excludes_list) + + tar_cmd = 'tar czvf %(tarball_name)s %(versioned_serverdir)s %(excludes)s' \ + % dict(tarball_name=tarball_name, + versioned_serverdir=versioned_serverdir, + excludes=excludes) + + if run(tar_cmd, suppress_stdout=True) != 0: + error('failed to generate the tarball') + + +def gen_tarball(): + # strip symbols of libraries to reduce size + if not conf[CONF_NO_STRIP]: + try: + strip_symbols() + except Exception as e: + error('failed to strip symbols: %s' % e) + + # determine the output name + # 64-bit: seafile-server_1.2.2_x86-64.tar.gz + # 32-bit: seafile-server_1.2.2_i386.tar.gz + version = conf[CONF_VERSION] + arch = os.uname()[-1].replace('_', '-') + if arch != 'x86-64': + arch = 'i386' + + dbg = '' + if conf[CONF_NO_STRIP]: + dbg = '.dbg' + + no_ceph = '' + if conf[CONF_NO_CEPH]: + no_ceph = '.no-ceph' + + brand = '' + if conf[CONF_BRAND]: + brand = '-%s' % conf[CONF_BRAND] + + tarball_name = 'seafile-pro-server_%(version)s_%(arch)s%(brand)s%(no_ceph)s%(dbg)s.tar.gz' \ + % dict(version=version, arch=arch, dbg=dbg, no_ceph=no_ceph, brand=brand) + dst_tarball = os.path.join(conf[CONF_OUTPUTDIR], tarball_name) + + # generate the tarball + try: + create_tarball(tarball_name) + except Exception as e: + error('failed to generate tarball: %s' % e) + + # move tarball to outputdir + try: + shutil.copy(tarball_name, dst_tarball) + except Exception as e: + error('failed to copy %s to %s: %s' % (tarball_name, dst_tarball, e)) + + print('---------------------------------------------') + print('The build is successfully. Output is:\t%s' % dst_tarball) + print('---------------------------------------------') + + +def main(): + parse_args() + setup_build_env() + + libsearpc = Libsearpc() + ccnet = Ccnet() + seafile = Seafile() + seahub = Seahub() + + libsearpc.uncompress() + libsearpc.build() + + ccnet.uncompress() + ccnet.build() + + seafile.uncompress() + seafile.build() + + seahub.uncompress() + seahub.build() + + copy_scripts_and_libs() + gen_tarball() + + +if __name__ == '__main__': + main() diff --git a/scripts/build/build-server.py b/scripts/build/build-server.py new file mode 100755 index 0000000000..a479b6c75c --- /dev/null +++ b/scripts/build/build-server.py @@ -0,0 +1,859 @@ +#!/usr/bin/env python3 +# coding: UTF-8 + +'''This script builds the seafile server tarball. + +Some notes: + +1. The working directory is always the 'builddir'. 'os.chdir' is only called +to change to the 'builddir'. We make use of the 'cwd' argument in +'subprocess.Popen' to run a command in a specific directory. + +2. django/djangorestframework/djblets/gunicorn/flup must be easy_install-ed to +a directory before running this script. That directory is passed in as the +'--thirdpartdir' arguments. + +''' +import sys +import os +import glob +import subprocess +import tempfile +import shutil +import re +import subprocess +import optparse +import atexit +import platform + +#################### +### Global variables +#################### + +# command line configuartion +conf = {} + +# key names in the conf dictionary. +CONF_VERSION = 'version' +CONF_SEAFILE_VERSION = 'seafile_version' +CONF_LIBSEARPC_VERSION = 'libsearpc_version' +CONF_SRCDIR = 'srcdir' +CONF_KEEP = 'keep' +CONF_BUILDDIR = 'builddir' +CONF_OUTPUTDIR = 'outputdir' +CONF_THIRDPARTDIR = 'thirdpartdir' +CONF_NO_STRIP = 'nostrip' +CONF_ENABLE_S3 = 's3' +CONF_YES = 'yes' +CONF_JOBS = 'jobs' +CONF_MYSQL_CONFIG = 'mysql_config' + +#################### +### Common helper functions +#################### +def highlight(content, is_error=False): + '''Add ANSI color to content to get it highlighted on terminal''' + if is_error: + return '\x1b[1;31m%s\x1b[m' % content + else: + return '\x1b[1;32m%s\x1b[m' % content + +def info(msg): + print(highlight('[INFO] ') + msg) + +def find_in_path(prog): + '''Find a file in system path''' + dirs = os.environ['PATH'].split(':') + for d in dirs: + if d == '': + continue + path = os.path.join(d, prog) + if os.path.exists(path): + return path + + return None + +def error(msg=None, usage=None): + if msg: + print(highlight('[ERROR] ') + msg) + if usage: + print(usage) + sys.exit(1) + +def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Run a program and wait for it to finish, and return its exit code. The + standard output of this program is supressed. + + ''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(argv, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env) + return proc.wait() + +def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Like run_argv but specify a command line string instead of argv''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(cmdline, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env, + shell=True) + return proc.wait() + +def must_mkdir(path): + '''Create a directory, exit on failure''' + try: + os.mkdir(path) + except OSError as e: + error('failed to create directory %s:%s' % (path, e)) + +def must_copy(src, dst): + '''Copy src to dst, exit on failure''' + try: + shutil.copy(src, dst) + except Exception as e: + error('failed to copy %s to %s: %s' % (src, dst, e)) + +class Project(object): + '''Base class for a project''' + # Project name, i.e. libseaprc/seafile/seahub + name = '' + + # A list of shell commands to configure/build the project + build_commands = [] + + def __init__(self): + # the path to pass to --prefix=/ + self.prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile') + self.version = self.get_version() + self.src_tarball = os.path.join(conf[CONF_SRCDIR], + '%s-%s.tar.gz' % (self.name, self.version)) + # project dir, like /seafile-1.2.2/ + self.projdir = os.path.join(conf[CONF_BUILDDIR], '%s-%s' % (self.name, self.version)) + + def get_version(self): + # libsearpc can have different versions from seafile. + raise NotImplementedError + + def uncompress(self): + '''Uncompress the source from the tarball''' + info('Uncompressing %s' % self.name) + + if run('tar xf %s' % self.src_tarball) < 0: + error('failed to uncompress source of %s' % self.name) + + def build(self): + '''Build the source''' + info('Building %s' % self.name) + for cmd in self.build_commands: + if run(cmd, cwd=self.projdir) != 0: + error('error when running command:\n\t%s\n' % cmd) + +class Libsearpc(Project): + name = 'libsearpc' + + def __init__(self): + Project.__init__(self) + self.build_commands = [ + './configure --prefix=%s' % self.prefix, + 'make -j%s' % conf[CONF_JOBS], + 'make install' + ] + + def get_version(self): + return conf[CONF_LIBSEARPC_VERSION] + +class Seafile(Project): + name = 'seafile' + def __init__(self): + Project.__init__(self) + s3_support = '' + if conf[CONF_ENABLE_S3]: + s3_support = '--enable-s3' + + configure_command = './configure --prefix=%s %s --enable-ldap' % (self.prefix, s3_support) + if conf[CONF_MYSQL_CONFIG]: + configure_command += ' --with-mysql=%s' % conf[CONF_MYSQL_CONFIG] + + self.build_commands = [ + configure_command, + 'make -j%s' % conf[CONF_JOBS], + 'make install' + ] + + def get_version(self): + return conf[CONF_SEAFILE_VERSION] + +class Seahub(Project): + name = 'seahub' + def __init__(self): + Project.__init__(self) + # nothing to do for seahub + self.build_commands = [ + ] + + def get_version(self): + return conf[CONF_SEAFILE_VERSION] + + def build(self): + self.write_version_to_settings_py() + + Project.build(self) + + def write_version_to_settings_py(self): + '''Write the version of current seafile server to seahub''' + settings_py = os.path.join(self.projdir, 'seahub', 'settings.py') + + line = '\nSEAFILE_VERSION = "%s"\n' % conf[CONF_VERSION] + with open(settings_py, 'a+') as fp: + fp.write(line) + + +def check_seahub_thirdpart(thirdpartdir): + '''The ${thirdpartdir} must have django/djblets/gunicorn pre-installed. So + we can copy it to seahub/thirdpart + + ''' + thirdpart_libs = [ + 'Django', + # 'Djblets', + 'gunicorn', + #'flup', + 'chardet', + 'python_dateutil', + #'django_picklefield', + #'django_constance', + # 'SQLAlchemy', + # 'python_daemon', + # 'lockfile', + # 'six', + ] + def check_thirdpart_lib(name): + name += '*' + if not glob.glob(os.path.join(thirdpartdir, name)): + error('%s not found in %s' % (name, thirdpartdir)) + + for lib in thirdpart_libs: + check_thirdpart_lib(lib) + +def check_targz_src(proj, version, srcdir): + src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version)) + if not os.path.exists(src_tarball): + error('%s not exists' % src_tarball) + +def check_targz_src_no_version(proj, srcdir): + src_tarball = os.path.join(srcdir, '%s.tar.gz' % proj) + if not os.path.exists(src_tarball): + error('%s not exists' % src_tarball) + +def check_pdf2htmlEX(): + pdf2htmlEX_executable = find_in_path('pdf2htmlEX') + if pdf2htmlEX_executable is None: + error('pdf2htmlEX not found') + +def validate_args(usage, options): + required_args = [ + CONF_VERSION, + CONF_LIBSEARPC_VERSION, + CONF_SEAFILE_VERSION, + CONF_SRCDIR, + CONF_THIRDPARTDIR, + ] + + # fist check required args + for optname in required_args: + if getattr(options, optname, None) == None: + error('%s must be specified' % optname, usage=usage) + + def get_option(optname): + return getattr(options, optname) + + # [ version ] + def check_project_version(version): + '''A valid version must be like 1.2.2, 1.3''' + if not re.match('^[0-9]+(\.([0-9])+)+$', version): + error('%s is not a valid version' % version, usage=usage) + + version = get_option(CONF_VERSION) + seafile_version = get_option(CONF_SEAFILE_VERSION) + libsearpc_version = get_option(CONF_LIBSEARPC_VERSION) + + check_project_version(version) + check_project_version(libsearpc_version) + check_project_version(seafile_version) + + # [ srcdir ] + srcdir = get_option(CONF_SRCDIR) + check_targz_src('libsearpc', libsearpc_version, srcdir) + check_targz_src('seafile', seafile_version, srcdir) + check_targz_src('seahub', seafile_version, srcdir) + check_targz_src_no_version('seafdav', srcdir) + check_targz_src_no_version('seafobj', srcdir) + + # check_pdf2htmlEX() + + # [ builddir ] + builddir = get_option(CONF_BUILDDIR) + if not os.path.exists(builddir): + error('%s does not exist' % builddir, usage=usage) + + builddir = os.path.join(builddir, 'seafile-server-build') + + # [ thirdpartdir ] + thirdpartdir = get_option(CONF_THIRDPARTDIR) + check_seahub_thirdpart(thirdpartdir) + + # [ outputdir ] + outputdir = get_option(CONF_OUTPUTDIR) + if outputdir: + if not os.path.exists(outputdir): + error('outputdir %s does not exist' % outputdir, usage=usage) + else: + outputdir = os.getcwd() + + # [ yes ] + yes = get_option(CONF_YES) + + # [ jobs ] + jobs = get_option(CONF_JOBS) + + # [ keep ] + keep = get_option(CONF_KEEP) + + # [ no strip] + nostrip = get_option(CONF_NO_STRIP) + + # [ s3 ] + s3 = get_option(CONF_ENABLE_S3) + + mysql_config_path = get_option(CONF_MYSQL_CONFIG) + + conf[CONF_VERSION] = version + conf[CONF_LIBSEARPC_VERSION] = libsearpc_version + conf[CONF_SEAFILE_VERSION] = seafile_version + + conf[CONF_BUILDDIR] = builddir + conf[CONF_SRCDIR] = srcdir + conf[CONF_OUTPUTDIR] = outputdir + conf[CONF_KEEP] = keep + conf[CONF_THIRDPARTDIR] = thirdpartdir + conf[CONF_NO_STRIP] = nostrip + conf[CONF_ENABLE_S3] = s3 + conf[CONF_YES] = yes + conf[CONF_JOBS] = jobs + conf[CONF_MYSQL_CONFIG] = mysql_config_path + + prepare_builddir(builddir) + show_build_info() + +def show_build_info(): + '''Print all conf information. Confirm before continue.''' + info('------------------------------------------') + info('Seafile server %s: BUILD INFO' % conf[CONF_VERSION]) + info('------------------------------------------') + info('seafile: %s' % conf[CONF_SEAFILE_VERSION]) + info('libsearpc: %s' % conf[CONF_LIBSEARPC_VERSION]) + info('builddir: %s' % conf[CONF_BUILDDIR]) + info('outputdir: %s' % conf[CONF_OUTPUTDIR]) + info('source dir: %s' % conf[CONF_SRCDIR]) + info('strip symbols: %s' % (not conf[CONF_NO_STRIP])) + info('s3 support: %s' % (conf[CONF_ENABLE_S3])) + info('clean on exit: %s' % (not conf[CONF_KEEP])) + if conf[CONF_YES]: + return + info('------------------------------------------') + info('press any key to continue ') + info('------------------------------------------') + input() + +def prepare_builddir(builddir): + must_mkdir(builddir) + + if not conf[CONF_KEEP]: + def remove_builddir(): + '''Remove the builddir when exit''' + info('remove builddir before exit') + shutil.rmtree(builddir, ignore_errors=True) + atexit.register(remove_builddir) + + os.chdir(builddir) + + must_mkdir(os.path.join(builddir, 'seafile-server')) + must_mkdir(os.path.join(builddir, 'seafile-server', 'seafile')) + +def parse_args(): + parser = optparse.OptionParser() + def long_opt(opt): + return '--' + opt + + parser.add_option(long_opt(CONF_YES), + dest=CONF_YES, + action='store_true') + + parser.add_option(long_opt(CONF_JOBS), + dest=CONF_JOBS, + default=2, + type=int) + + parser.add_option(long_opt(CONF_THIRDPARTDIR), + dest=CONF_THIRDPARTDIR, + nargs=1, + help='where to find the thirdpart libs for seahub') + + parser.add_option(long_opt(CONF_VERSION), + dest=CONF_VERSION, + nargs=1, + help='the version to build. Must be digits delimited by dots, like 1.3.0') + + parser.add_option(long_opt(CONF_SEAFILE_VERSION), + dest=CONF_SEAFILE_VERSION, + nargs=1, + help='the version of seafile as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0') + + parser.add_option(long_opt(CONF_LIBSEARPC_VERSION), + dest=CONF_LIBSEARPC_VERSION, + nargs=1, + help='the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0') + + parser.add_option(long_opt(CONF_BUILDDIR), + dest=CONF_BUILDDIR, + nargs=1, + help='the directory to build the source. Defaults to /tmp', + default=tempfile.gettempdir()) + + parser.add_option(long_opt(CONF_OUTPUTDIR), + dest=CONF_OUTPUTDIR, + nargs=1, + help='the output directory to put the generated server tarball. Defaults to the current directory.', + default=os.getcwd()) + + parser.add_option(long_opt(CONF_SRCDIR), + dest=CONF_SRCDIR, + nargs=1, + help='''Source tarballs must be placed in this directory.''') + + parser.add_option(long_opt(CONF_KEEP), + dest=CONF_KEEP, + action='store_true', + help='''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''') + + parser.add_option(long_opt(CONF_NO_STRIP), + dest=CONF_NO_STRIP, + action='store_true', + help='''do not strip debug symbols''') + + parser.add_option(long_opt(CONF_ENABLE_S3), + dest=CONF_ENABLE_S3, + action='store_true', + help='''enable amazon s3 support''') + + parser.add_option(long_opt(CONF_MYSQL_CONFIG), + dest=CONF_MYSQL_CONFIG, + nargs=1, + help='''Absolute path to mysql_config or mariadb_config program.''') + + usage = parser.format_help() + options, remain = parser.parse_args() + if remain: + error(usage=usage) + + validate_args(usage, options) + +def setup_build_env(): + '''Setup environment variables, such as export PATH=$BUILDDDIR/bin:$PATH''' + prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile') + def prepend_env_value(name, value, seperator=':'): + '''append a new value to a list''' + try: + current_value = os.environ[name] + except KeyError: + current_value = '' + + new_value = value + if current_value: + new_value += seperator + current_value + + os.environ[name] = new_value + + prepend_env_value('CPPFLAGS', + '-I%s' % os.path.join(prefix, 'include'), + seperator=' ') + + prepend_env_value('CPPFLAGS', + '-DLIBICONV_PLUG', + seperator=' ') + + if conf[CONF_NO_STRIP]: + prepend_env_value('CPPFLAGS', + '-g -O0', + seperator=' ') + + prepend_env_value('CFLAGS', + '-g -O0', + seperator=' ') + + prepend_env_value('LDFLAGS', + '-L%s' % os.path.join(prefix, 'lib'), + seperator=' ') + + prepend_env_value('LDFLAGS', + '-L%s' % os.path.join(prefix, 'lib64'), + seperator=' ') + + prepend_env_value('PATH', os.path.join(prefix, 'bin')) + prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib', 'pkgconfig')) + prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib64', 'pkgconfig')) + +def copy_user_manuals(): + builddir = conf[CONF_BUILDDIR] + # src_pattern = os.path.join(builddir, Seafile().projdir, 'doc', '*.doc') + src_pattern = os.path.join(builddir, Seafile().projdir, 'doc', 'seafile-tutorial.doc') + dst_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'docs') + + must_mkdir(dst_dir) + + for path in glob.glob(src_pattern): + must_copy(path, dst_dir) + +def copy_seafdav(): + dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', 'thirdpart') + tarball = os.path.join(conf[CONF_SRCDIR], 'seafdav.tar.gz') + if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0: + error('failed to uncompress %s' % tarball) + + dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', 'thirdpart') + tarball = os.path.join(conf[CONF_SRCDIR], 'seafobj.tar.gz') + if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0: + error('failed to uncompress %s' % tarball) + +def copy_scripts_and_libs(): + '''Copy server release scripts and shared libs, as well as seahub + thirdpart libs + + ''' + builddir = conf[CONF_BUILDDIR] + scripts_srcdir = os.path.join(builddir, Seafile().projdir, 'scripts') + serverdir = os.path.join(builddir, 'seafile-server') + + must_copy(os.path.join(scripts_srcdir, 'setup-seafile.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'setup-seafile-mysql.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'setup-seafile-mysql.py'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seafile.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seahub.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'reset-admin.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-fuse.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'check_init_admin.py'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-gc.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-fsck.sh'), + serverdir) + + # copy update scripts + update_scriptsdir = os.path.join(scripts_srcdir, 'upgrade') + dst_update_scriptsdir = os.path.join(serverdir, 'upgrade') + try: + shutil.copytree(update_scriptsdir, dst_update_scriptsdir) + except Exception as e: + error('failed to copy upgrade scripts: %s' % e) + + # copy sql scripts + sql_scriptsdir = os.path.join(scripts_srcdir, 'sql') + dst_sql_scriptsdir = os.path.join(serverdir, 'sql') + try: + shutil.copytree(sql_scriptsdir, dst_sql_scriptsdir) + except Exception as e: + error('failed to copy sql scripts: %s' % e) + + # copy runtime/seahub.conf + runtimedir = os.path.join(serverdir, 'runtime') + must_mkdir(runtimedir) + must_copy(os.path.join(scripts_srcdir, 'seahub.conf'), + runtimedir) + + # move seahub to seafile-server/seahub + src_seahubdir = Seahub().projdir + dst_seahubdir = os.path.join(serverdir, 'seahub') + try: + shutil.move(src_seahubdir, dst_seahubdir) + except Exception as e: + error('failed to move seahub to seafile-server/seahub: %s' % e) + + # copy seahub thirdpart libs + seahub_thirdpart = os.path.join(dst_seahubdir, 'thirdpart') + copy_seahub_thirdpart_libs(seahub_thirdpart) + copy_seafdav() + + + # copy_pdf2htmlex() + + # copy shared c libs + copy_shared_libs() + copy_user_manuals() + +def copy_pdf2htmlex(): + '''Copy pdf2htmlEX exectuable and its dependent libs''' + pdf2htmlEX_executable = find_in_path('pdf2htmlEX') + libs = get_dependent_libs(pdf2htmlEX_executable) + + builddir = conf[CONF_BUILDDIR] + dst_lib_dir = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'lib') + + dst_bin_dir = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'bin') + + for lib in libs: + dst_file = os.path.join(dst_lib_dir, os.path.basename(lib)) + if os.path.exists(dst_file): + continue + info('Copying %s' % lib) + must_copy(lib, dst_lib_dir) + + must_copy(pdf2htmlEX_executable, dst_bin_dir) + +def get_dependent_libs(executable): + syslibs = ['libsearpc', 'libseafile', 'libpthread.so', 'libc.so', 'libm.so', 'librt.so', 'libdl.so', 'libselinux.so', 'libresolv.so' ] + def is_syslib(lib): + for syslib in syslibs: + if syslib in lib: + return True + return False + + ldd_output = subprocess.getoutput('ldd %s' % executable) + ret = set() + for line in ldd_output.splitlines(): + tokens = line.split() + if len(tokens) != 4: + continue + if is_syslib(tokens[0]): + continue + + ret.add(tokens[2]) + + return ret + +def copy_shared_libs(): + '''copy shared c libs, such as libevent, glib, libmysqlclient''' + builddir = conf[CONF_BUILDDIR] + + dst_dir = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'lib') + + seafile_path = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'bin', + 'seaf-server') + + seaf_fuse_path = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'bin', + 'seaf-fuse') + + libs = set() + libs.update(get_dependent_libs(seafile_path)) + libs.update(get_dependent_libs(seaf_fuse_path)) + + for lib in libs: + dst_file = os.path.join(dst_dir, os.path.basename(lib)) + if os.path.exists(dst_file): + continue + info('Copying %s' % lib) + shutil.copy(lib, dst_dir) + +def copy_seahub_thirdpart_libs(seahub_thirdpart): + '''copy python third-party libraries from ${thirdpartdir} to + seahub/thirdpart + + ''' + src = conf[CONF_THIRDPARTDIR] + dst = seahub_thirdpart + + try: + for name in os.listdir(src): + src_path = os.path.join(src, name) + target_path = os.path.join(dst, name) + if os.path.isdir(src_path): + shutil.copytree(src_path, target_path) + else: + shutil.copy(src_path, target_path) + except Exception as e: + error('failed to copy seahub thirdpart libs: %s' % e) + +def strip_symbols(): + def do_strip(fn): + run('chmod u+w %s' % fn) + info('stripping: %s' % fn) + run('strip "%s"' % fn) + + def remove_static_lib(fn): + info('removing: %s' % fn) + os.remove(fn) + + for parent, dnames, fnames in os.walk('seafile-server/seafile'): + dummy = dnames # avoid pylint 'unused' warning + for fname in fnames: + fn = os.path.join(parent, fname) + if os.path.isdir(fn): + continue + + if fn.endswith(".a") or fn.endswith(".la"): + remove_static_lib(fn) + continue + + if os.path.islink(fn): + continue + + finfo = subprocess.getoutput('file "%s"' % fn) + + if 'not stripped' in finfo: + do_strip(fn) + +def create_tarball(tarball_name): + '''call tar command to generate a tarball''' + version = conf[CONF_VERSION] + + serverdir = 'seafile-server' + versioned_serverdir = 'seafile-server-' + version + + # move seafile-server to seafile-server-${version} + try: + shutil.move(serverdir, versioned_serverdir) + except Exception as e: + error('failed to move %s to %s: %s' % (serverdir, versioned_serverdir, e)) + + ignored_patterns = [ + # common ignored files + '*.pyc', + '*~', + '*#', + + # seahub + os.path.join(versioned_serverdir, 'seahub', '.git*'), + os.path.join(versioned_serverdir, 'seahub', 'media', 'flexpaper*'), + os.path.join(versioned_serverdir, 'seahub', 'avatar', 'testdata*'), + + # seafile + os.path.join(versioned_serverdir, 'seafile', 'share*'), + os.path.join(versioned_serverdir, 'seafile', 'include*'), + os.path.join(versioned_serverdir, 'seafile', 'lib', 'pkgconfig*'), + os.path.join(versioned_serverdir, 'seafile', 'lib64', 'pkgconfig*'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'searpc-codegen.py'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile-admin'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile'), + ] + + excludes_list = [ '--exclude=%s' % pattern for pattern in ignored_patterns ] + excludes = ' '.join(excludes_list) + + tar_cmd = 'tar czf %(tarball_name)s %(versioned_serverdir)s %(excludes)s' \ + % dict(tarball_name=tarball_name, + versioned_serverdir=versioned_serverdir, + excludes=excludes) + + if run(tar_cmd) < 0: + error('failed to generate the tarball') + +def gen_tarball(): + # strip symbols of libraries to reduce size + if not conf[CONF_NO_STRIP]: + try: + strip_symbols() + except Exception as e: + error('failed to strip symbols: %s' % e) + + # determine the output name + # 64-bit: seafile-server_1.2.2_x86-64.tar.gz + # 32-bit: seafile-server_1.2.2_i386.tar.gz + version = conf[CONF_VERSION] + arch = os.uname()[-1].replace('_', '-') + if 'arm' in platform.machine(): + arch = 'pi' + elif arch != 'x86-64': + arch = 'i386' + + dbg = '' + if conf[CONF_NO_STRIP]: + dbg = '.dbg' + + tarball_name = 'seafile-server_%(version)s_%(arch)s%(dbg)s.tar.gz' \ + % dict(version=version, arch=arch, dbg=dbg) + dst_tarball = os.path.join(conf[CONF_OUTPUTDIR], tarball_name) + + # generate the tarball + try: + create_tarball(tarball_name) + except Exception as e: + error('failed to generate tarball: %s' % e) + + # move tarball to outputdir + try: + shutil.copy(tarball_name, dst_tarball) + except Exception as e: + error('failed to copy %s to %s: %s' % (tarball_name, dst_tarball, e)) + + print('---------------------------------------------') + print('The build is successful. Output is:\t%s' % dst_tarball) + print('---------------------------------------------') + +def main(): + parse_args() + setup_build_env() + + libsearpc = Libsearpc() + seafile = Seafile() + seahub = Seahub() + + libsearpc.uncompress() + libsearpc.build() + + seafile.uncompress() + seafile.build() + + seahub.uncompress() + seahub.build() + + copy_scripts_and_libs() + gen_tarball() + +if __name__ == '__main__': + main() diff --git a/scripts/check-db-type.py b/scripts/check-db-type.py new file mode 100644 index 0000000000..daf2f67703 --- /dev/null +++ b/scripts/check-db-type.py @@ -0,0 +1,23 @@ +import sys +from configparser import ConfigParser + +if len(sys.argv) != 2: + print('check-db-type.py ', file=sys.stderr) + +seafile_conf_file = sys.argv[1] + +parser = ConfigParser() +parser.read(seafile_conf_file) + +if not parser.has_option('database', 'type'): + print('sqlite') +else: + db_type = parser.get('database', 'type') + if db_type == 'sqlite': + print('sqlite') + elif db_type == 'mysql': + print('mysql') + elif db_type == 'pgsql': + print('pgsql') + else: + print('unknown') diff --git a/scripts/check_init_admin.py b/scripts/check_init_admin.py new file mode 100644 index 0000000000..cc7bcd37ae --- /dev/null +++ b/scripts/check_init_admin.py @@ -0,0 +1,368 @@ +#coding: UTF-8 + +'''This script would check if there is admin, and prompt the user to create a new one if non exist''' +import json +import sys +import os +import time +import re +import shutil +import glob +import subprocess +import hashlib +import getpass +import uuid +import warnings + +from configparser import ConfigParser + +from seaserv import ccnet_api + +try: + import readline # pylint: disable=W0611 +except ImportError: + pass + + +SERVER_MANUAL_HTTP = 'https://download.seafile.com/published/seafile-manual/home.md' + +class Utils(object): + '''Groups all helper functions here''' + @staticmethod + def welcome(): + '''Show welcome message''' + welcome_msg = '''\ +----------------------------------------------------------------- +This script will guide you to setup your seafile server using MySQL. +Make sure you have read seafile server manual at + + %s + +Press ENTER to continue +-----------------------------------------------------------------''' % SERVER_MANUAL_HTTP + print(welcome_msg) + input() + + @staticmethod + def highlight(content): + '''Add ANSI color to content to get it highlighted on terminal''' + return '\x1b[33m%s\x1b[m' % content + + @staticmethod + def info(msg): + print(msg) + + @staticmethod + def error(msg): + '''Print error and exit''' + print() + print('Error: ' + msg) + sys.exit(1) + + @staticmethod + def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Run a program and wait it to finish, and return its exit code. The + standard output of this program is supressed. + + ''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(argv, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env) + return proc.wait() + + @staticmethod + def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Like run_argv but specify a command line string instead of argv''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(cmdline, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env, + shell=True) + return proc.wait() + + @staticmethod + def prepend_env_value(name, value, env=None, seperator=':'): + '''prepend a new value to a list''' + if env is None: + env = os.environ + + try: + current_value = env[name] + except KeyError: + current_value = '' + + new_value = value + if current_value: + new_value += seperator + current_value + + env[name] = new_value + + @staticmethod + def must_mkdir(path): + '''Create a directory, exit on failure''' + try: + os.mkdir(path) + except OSError as e: + Utils.error('failed to create directory %s:%s' % (path, e)) + + @staticmethod + def must_copy(src, dst): + '''Copy src to dst, exit on failure''' + try: + shutil.copy(src, dst) + except Exception as e: + Utils.error('failed to copy %s to %s: %s' % (src, dst, e)) + + @staticmethod + def find_in_path(prog): + if 'win32' in sys.platform: + sep = ';' + else: + sep = ':' + + dirs = os.environ['PATH'].split(sep) + for d in dirs: + d = d.strip() + if d == '': + continue + path = os.path.join(d, prog) + if os.path.exists(path): + return path + + return None + + @staticmethod + def get_python_executable(): + '''Return the python executable. This should be the PYTHON environment + variable which is set in setup-seafile-mysql.sh + + ''' + return os.environ['PYTHON'] + + @staticmethod + def read_config(fn): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + cp = ConfigParser() + cp.optionxform = str + cp.read(fn) + + return cp + + @staticmethod + def write_config(cp, fn): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + with open(fn, 'w') as fp: + cp.write(fp) + + @staticmethod + def ask_question(desc, + key=None, + note=None, + default=None, + validate=None, + yes_or_no=False, + password=False): + '''Ask a question, return the answer. + @desc description, e.g. "What is the port of ccnet?" + + @key a name to represent the target of the question, e.g. "port for + ccnet server" + + @note additional information for the question, e.g. "Must be a valid + port number" + + @default the default value of the question. If the default value is + not None, when the user enter nothing and press [ENTER], the default + value would be returned + + @validate a function that takes the user input as the only parameter + and validate it. It should return a validated value, or throws an + "InvalidAnswer" exception if the input is not valid. + + @yes_or_no If true, the user must answer "yes" or "no", and a boolean + value would be returned + + @password If true, the user input would not be echoed to the + console + + ''' + assert key or yes_or_no + # Format description + print() + if note: + desc += '\n' + note + + desc += '\n' + if yes_or_no: + desc += '[ yes or no ]' + else: + if default: + desc += '[ default "%s" ]' % default + else: + desc += '[ %s ]' % key + + desc += ' ' + while True: + # prompt for user input + if password: + answer = getpass.getpass(desc).strip() + else: + answer = input(desc).strip() + + # No user input: use default + if not answer: + if default: + answer = default + else: + continue + + # Have user input: validate answer + if yes_or_no: + if answer not in ['yes', 'no']: + print(Utils.highlight('\nPlease answer yes or no\n')) + continue + else: + return answer == 'yes' + else: + if validate: + try: + return validate(answer) + except InvalidAnswer as e: + print(Utils.highlight('\n%s\n' % e)) + continue + else: + return answer + + @staticmethod + def validate_port(port): + try: + port = int(port) + except ValueError: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + if port <= 0 or port > 65535: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + return port + + +class InvalidAnswer(Exception): + def __init__(self, msg): + Exception.__init__(self) + self.msg = msg + def __str__(self): + return self.msg + +### END of Utils +#################### + +def need_create_admin(): + users = ccnet_api.get_emailusers('DB', 0, 1) + return len(users) == 0 + +def create_admin(email, passwd): + if ccnet_api.add_emailuser(email, passwd, 1, 1) < 0: + raise Exception('failed to create admin') + else: + print('\n\n') + print('----------------------------------------') + print('Successfully created seafile admin') + print('----------------------------------------') + print('\n\n') + +def ask_admin_email(): + print() + print('----------------------------------------') + print('It\'s the first time you start the seafile server. Now let\'s create the admin account') + print('----------------------------------------') + def validate(email): + # whitespace is not allowed + if re.match(r'[\s]', email): + raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email)) + # must be a valid email address + if not re.match(r'^.+@.*\..+$', email): + raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email)) + + return email + + key = 'admin email' + question = 'What is the ' + Utils.highlight('email') + ' for the admin account?' + return Utils.ask_question(question, + key=key, + validate=validate) + +def ask_admin_password(): + def validate(password): + key = 'admin password again' + question = 'Enter the ' + Utils.highlight('password again:') + password_again = Utils.ask_question(question, + key=key, + password=True) + + if password_again != password: + raise InvalidAnswer('password mismatch') + + return password + + key = 'admin password' + question = 'What is the ' + Utils.highlight('password') + ' for the admin account?' + return Utils.ask_question(question, + key=key, + password=True, + validate=validate) + + +def main(): + if not need_create_admin(): + return + + password_file = os.path.join(os.environ['SEAFILE_CENTRAL_CONF_DIR'], 'admin.txt') + if os.path.exists(password_file): + with open(password_file, 'r') as fp: + pwinfo = json.load(fp) + email = pwinfo['email'] + passwd = pwinfo['password'] + os.unlink(password_file) + else: + email = ask_admin_email() + passwd = ask_admin_password() + + create_admin(email, passwd) + +if __name__ == '__main__': + try: + main() + except KeyboardInterrupt: + print('\n\n\n') + print(Utils.highlight('Aborted.')) + print() + sys.exit(1) + except Exception as e: + print() + print(Utils.highlight('Error happened during creating seafile admin.')) + print() diff --git a/scripts/index_op.py b/scripts/index_op.py new file mode 100755 index 0000000000..99e851cf1c --- /dev/null +++ b/scripts/index_op.py @@ -0,0 +1,49 @@ +import logging +import argparse + +from seafes.config import seafes_config +from seafes.repo_data import repo_data +from seafes.mq import get_mq + +seafes_config.load_index_master_conf() +mq = get_mq(seafes_config.subscribe_mq, + seafes_config.subscribe_server, + seafes_config.subscribe_port, + seafes_config.subscribe_password) + +def put_to_redis(repo_id, cmt_id): + msg = "index_recover\t%s\t%s" % (repo_id, cmt_id) + mq.lpush('index_task', msg) + +def show_all_task(): + logging.info("index task count: %s" % mq.llen('index_task')) + +def restore_all_repo(): + start, count = 0, 1000 + while True: + try: + repo_commits = repo_data.get_repo_id_commit_id(start, count) + except Exception as e: + logging.error("Error: %s" % e) + return + else: + if len(repo_commits) == 0: + break + for repo_id, commit_id in repo_commits: + put_to_redis(repo_id, commit_id) + start += 1000 + +def main(): + parser = argparse.ArgumentParser(description='main program') + parser.add_argument('--mode') + parser_args = parser.parse_args() + + if parser_args.mode == 'resotre_all_repo': + restore_all_repo() + elif parser_args.mode == 'show_all_task': + show_all_task() + + +if __name__ == '__main__': + main() + diff --git a/scripts/migrate-repo.py b/scripts/migrate-repo.py new file mode 100644 index 0000000000..a4fd7328df --- /dev/null +++ b/scripts/migrate-repo.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 + +import os +import sys +import logging +import configparser +from sqlalchemy import create_engine, text +from sqlalchemy.orm import sessionmaker +from migrate import ObjMigrateWorker +from seafobj.objstore_factory import objstore_factory +from seaserv import seafile_api as api +from seaserv import REPO_STATUS_READ_ONLY, REPO_STATUS_NORMAL + +logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) + +def main(argv): + if len(argv) == 4: + all_migrate = False + repo_id = argv[1] + orig_storage_id = argv[2] + dest_storage_id = argv[3] + elif len(argv) == 3: + all_migrate = True + orig_storage_id = argv[1] + dest_storage_id = argv[2] + + if all_migrate: + migrate_repos(orig_storage_id, dest_storage_id) + else: + migrate_repo(repo_id, orig_storage_id, dest_storage_id) + +def parse_seafile_config(): + env = os.environ + seafile_conf = os.path.join(env['SEAFILE_CENTRAL_CONF_DIR'], 'seafile.conf') + cp = configparser.ConfigParser() + cp.read(seafile_conf) + host = cp.get('database', 'host') + port = cp.get('database', 'port') + user = cp.get('database', 'user') + passwd = cp.get('database', 'password') + db_name = cp.get('database', 'db_name') + return host, port, user, passwd, db_name + +def get_repo_ids(): + host, port, user, passwd, db_name = parse_seafile_config() + url = 'mysql+pymysql://' + user + ':' + passwd + '@' + host + ':' + port + '/' + db_name + print(url) + sql = 'SELECT repo_id FROM Repo' + try: + engine = create_engine(url, echo=False) + session = sessionmaker(engine)() + result_proxy = session.execute(text(sql)) + except: + return None + else: + result = result_proxy.fetchall() + return result + +def migrate_repo(repo_id, orig_storage_id, dest_storage_id): + api.set_repo_status (repo_id, REPO_STATUS_READ_ONLY) + dtypes = ['commits', 'fs', 'blocks'] + workers = [] + for dtype in dtypes: + obj_stores = objstore_factory.get_obj_stores(dtype) + #If these storage ids passed in do not exist in conf, stop migrate this repo. + if orig_storage_id not in obj_stores or dest_storage_id not in obj_stores: + logging.warning('Storage id passed in does not exist in configuration.\n') + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + sys.exit() + + orig_store = obj_stores[orig_storage_id] + dest_store = obj_stores[dest_storage_id] + + try: + worker = ObjMigrateWorker (orig_store, dest_store, dtype, repo_id) + worker.start() + workers.append(worker) + except: + logging.warning('Failed to migrate repo %s.', repo_id) + + try: + for w in workers: + w.join() + except: + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + sys.exit(1) + + for w in workers: + if w.exit_code == 1: + logging.warning(w.exception) + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + sys.exit(1) + + if api.update_repo_storage_id(repo_id, dest_storage_id) < 0: + logging.warning('Failed to update repo [%s] storage_id.\n', repo_id) + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + return + + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + logging.info('The process of migrating repo [%s] is over.\n', repo_id) + +def migrate_repos(orig_storage_id, dest_storage_id): + repo_ids = get_repo_ids() + + for repo_id in repo_ids: + try: + repo_id = repo_id[0] + except: + continue + api.set_repo_status (repo_id, REPO_STATUS_READ_ONLY) + dtypes = ['commits', 'fs', 'blocks'] + workers = [] + for dtype in dtypes: + obj_stores = objstore_factory.get_obj_stores(dtype) + #If these storage ids passed in do not exist in conf, stop migrate this repo. + if orig_storage_id not in obj_stores or dest_storage_id not in obj_stores: + logging.warning('Storage id passed in does not exist in configuration.\n') + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + sys.exit() + + orig_store = obj_stores[orig_storage_id] + dest_store = obj_stores[dest_storage_id] + + try: + worker = ObjMigrateWorker (orig_store, dest_store, dtype, repo_id) + worker.start() + workers.append(worker) + except: + logging.warning('Failed to migrate repo %s.', repo_id) + + try: + for w in workers: + w.join() + except: + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + sys.exit(1) + + for w in workers: + if w.exit_code == 1: + logging.warning(w.exception) + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + sys.exit(1) + + if api.update_repo_storage_id(repo_id, dest_storage_id) < 0: + logging.warning('Failed to update repo [%s] storage_id.\n', repo_id) + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + return + + api.set_repo_status (repo_id, REPO_STATUS_NORMAL) + logging.info('The process of migrating repo [%s] is over.\n', repo_id) + +if __name__ == '__main__': + main(sys.argv) diff --git a/scripts/migrate-repo.sh b/scripts/migrate-repo.sh new file mode 100755 index 0000000000..b4afa22ffc --- /dev/null +++ b/scripts/migrate-repo.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +seafile_rpc_pipe_path=${INSTALLPATH}/runtime +migrate=${INSTALLPATH}/migrate-repo.py + +script_name=$0 +function usage () { + echo "usage : " + echo " ./$(basename ${script_name})" \ + "[repo id to migrate]" \ + "" \ + "" + echo"" +} + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function do_migrate () { + export CCNET_CONF_DIR=${default_ccnet_conf_dir} + export SEAFILE_CONF_DIR=${default_seafile_data_dir} + export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} + export SEAFILE_RPC_PIPE_PATH=${seafile_rpc_pipe_path} + export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + $PYTHON ${migrate} $@ +} + +check_python_executable; + +if [ $# -gt 0 ]; +then + for param in $@; + do + if [ ${param} = "-h" -o ${param} = "--help" ]; + then + usage; + exit 1; + fi + done +fi + +if [ $# -ne 3 ] && [ $# -ne 2 ]; +then + usage; + exit 1; +fi + +do_migrate $@; + +echo "Done." diff --git a/scripts/migrate.py b/scripts/migrate.py new file mode 100755 index 0000000000..f0f8211328 --- /dev/null +++ b/scripts/migrate.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +#coding: utf-8 + +import os +import re +import sys +import logging +import queue +import threading +from threading import Thread +from uuid import UUID +from seafobj.objstore_factory import SeafObjStoreFactory + +logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) + +class Worker(Thread): + def __init__(self, do_work, task_queue): + Thread.__init__(self) + self.do_work = do_work + self.task_queue = task_queue + + def run(self): + while True: + try: + task = self.task_queue.get() + if task is None: + break + self.do_work(task) + except Exception as e: + logging.warning('Failed to execute task: %s' % e) + finally: + self.task_queue.task_done() + +class ThreadPool(object): + def __init__(self, do_work, nworker=20): + self.do_work = do_work + self.nworker = nworker + self.task_queue = queue.Queue(maxsize = 2000) + + def start(self): + for i in range(self.nworker): + Worker(self.do_work, self.task_queue).start() + + def put_task(self, task): + self.task_queue.put(task) + + def join(self): + self.task_queue.join() + # notify all thread to stop + for i in range(self.nworker): + self.task_queue.put(None) + +class Task(object): + def __init__(self, repo_id, repo_version, obj_id): + self.repo_id = repo_id + self.repo_version = repo_version + self.obj_id = obj_id + +class ObjMigrateWorker(Thread): + def __init__(self, orig_store, dest_store, dtype, repo_id = None): + Thread.__init__(self) + self.lock = threading.Lock() + self.dtype = dtype + self.orig_store = orig_store + self.dest_store = dest_store + self.repo_id = repo_id + self.thread_pool = ThreadPool(self.do_work) + self.write_count = 0 + self.fetch_count = 0 + self.dest_objs = {} + self.object_list_file_path = '' + self.fd = None + self.exit_code = 0 + self.exception = None + + def run(self): + try: + self._run() + except Exception as e: + self.exit_code = 1 + self.exception = e + + def _run(self): + if 'OBJECT_LIST_FILE_PATH' in os.environ: + if self.repo_id: + self.object_list_file_path = '.'.join(['_'.join([os.environ['OBJECT_LIST_FILE_PATH'], self.repo_id]), self.dtype]) + else: + self.object_list_file_path = '.'.join([os.environ['OBJECT_LIST_FILE_PATH'], self.dtype]) + + if self.object_list_file_path and \ + os.path.exists(self.object_list_file_path) and \ + os.path.getsize(self.object_list_file_path) > 0: + logging.info('Start to load [%s] destination object from file' % self.dtype) + with open(self.object_list_file_path, 'r') as f: + for line in f: + obj = line.rstrip('\n').split('/', 1) + if self.invalid_obj(obj): + continue + self.fetch_count += 1 + if obj[0] in self.dest_objs: + self.dest_objs[obj[0]].add(obj[1]) + else: + self.dest_objs[obj[0]] = set() + self.dest_objs[obj[0]].add(obj[1]) + + else: + logging.info('Start to fetch [%s] object from destination' % self.dtype) + if self.object_list_file_path: + f = open(self.object_list_file_path, 'a') + for obj in self.dest_store.list_objs(self.repo_id): + if self.invalid_obj(obj): + continue + self.fetch_count += 1 + if obj[0] in self.dest_objs: + self.dest_objs[obj[0]].add(obj[1]) + else: + self.dest_objs[obj[0]] = set() + self.dest_objs[obj[0]].add(obj[1]) + if self.object_list_file_path: + f.write('/'.join(obj[:2]) + '\n') + if self.fetch_count % 100 == 0: + f.flush() + if self.object_list_file_path: + f.close() + logging.info('[%s] [%d] objects exist in destination' % (self.dtype, self.fetch_count)) + + if self.object_list_file_path: + self.fd = open(self.object_list_file_path, 'a') + logging.info('Start to migrate [%s] object' % self.dtype) + self.thread_pool.start() + self.migrate() + self.thread_pool.join() + if self.object_list_file_path: + self.fd.close() + logging.info('Complete migrate [%s] object' % self.dtype) + + def do_work(self, task): + try: + exists = False + if task.repo_id in self.dest_objs: + if task.obj_id in self.dest_objs[task.repo_id]: + exists = True + + except Exception as e: + logging.warning('[%s] Failed to check object %s existence from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e)) + raise + + if not exists: + try: + data = self.orig_store.read_obj_raw(task.repo_id, task.repo_version, task.obj_id) + except Exception as e: + logging.warning('[%s] Failed to read object %s from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e)) + raise + + try: + self.dest_store.write_obj(data, task.repo_id, task.obj_id) + self.write_count += 1 + if self.write_count % 100 == 0: + logging.info('[%s] task: %s objects written to destination.', self.dtype, self.write_count) + + if self.object_list_file_path: + with self.lock: + self.fd.write('/'.join([task.repo_id, task.obj_id]) + '\n') + if self.write_count % 100 == 0: + self.fd.flush() + except Exception as e: + logging.warning('[%s] Failed to write object %s from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e)) + raise + + def migrate(self): + try: + obj_list = self.orig_store.list_objs(self.repo_id) + except Exception as e: + logging.warning('[%s] Failed to list all objects: %s' % (self.dtype, e)) + raise + + for obj in obj_list: + if self.invalid_obj(obj): + continue + repo_id = obj[0] + obj_id = obj[1] + task = Task(repo_id, 1, obj_id) + self.thread_pool.put_task(task) + + def invalid_obj(self, obj): + if len(obj) < 2: + return True + try: + UUID(obj[0], version = 4) + except ValueError: + return True + if len(obj[1]) != 40 or not re.match('\A[0-9a-f]+\Z', obj[1]): + return True + return False + +def main(): + try: + orig_obj_factory = SeafObjStoreFactory() + os.environ['SEAFILE_CENTRAL_CONF_DIR'] = os.environ['DEST_SEAFILE_CENTRAL_CONF_DIR'] + except KeyError: + logging.warning('DEST_SEAFILE_CENTRAL_CONF_DIR environment variable is not set.\n') + sys.exit() + + dest_obj_factory = SeafObjStoreFactory() + + dtypes = ['commits', 'fs', 'blocks'] + for dtype in dtypes: + orig_store = orig_obj_factory.get_obj_store(dtype) + dest_store = dest_obj_factory.get_obj_store(dtype) + ObjMigrateWorker(orig_store, dest_store, dtype).start() + +if __name__ == '__main__': + main() diff --git a/scripts/migrate.sh b/scripts/migrate.sh new file mode 100755 index 0000000000..deabe98aad --- /dev/null +++ b/scripts/migrate.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf + +migrate=${INSTALLPATH}/migrate.py + +script_name=$0 +function usage () { + echo "usage : " + echo " ./$(basename ${script_name}) destination_config_file_path" + echo "" +} + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function do_migrate () { + export CCNET_CONF_DIR=${default_ccnet_conf_dir} + export SEAFILE_CONF_DIR=${default_seafile_data_dir} + export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} + export DEST_SEAFILE_CENTRAL_CONF_DIR=${dest_seafile_central_conf_dir} + + export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + + $PYTHON ${migrate} +} + +check_python_executable; + +if [ $# -gt 0 ]; +then + for param in $@; + do + if [ ${param} = "-h" -o ${param} = "--help" ]; + then + usage; + exit 1; + fi + done +fi + +if [ $# -ne 1 ]; +then + usage; + exit 1; +fi + +dest_seafile_central_conf_dir="$1" +do_migrate; + +echo "Done." diff --git a/scripts/pro.py b/scripts/pro.py new file mode 100755 index 0000000000..69cb402eb9 --- /dev/null +++ b/scripts/pro.py @@ -0,0 +1,817 @@ +#!/usr/bin/env python3 + +''' +Setup/Start/Stop the extra components of Seafile Professional + +The diretory layout: +- haiwen + - seafile-server-1.8.0 + - seafile.sh + - seahub.sh + - seafile/ + - seahub/ + - pro + - pro.py + - python + - sqlalchemy/ + - pyes/ + - thrift/ + - libevent + - python-daemon/ + - lockfile/ + - seafes/ + - seafevents/ + - seaf-dav/ + - elasticsearch/ + - misc + - seahub_extra.sql + + - seafile-license.txt + - seahub.db + - seahub_settings.py + - ccnet/ + - seafile-data/ + - seahub-data/ + - pro-data + - search/ + - data/ + - logs/ + - seafevents.conf + - seafdav.conf + - seafevents.db + - index.log + - seafevents.log +''' + +import os +import sys +import glob +import subprocess +import io +import getpass + +try: + import pymysql +except: + pass + +import configparser + +######################## +## Helper functions +######################## + +class InvalidAnswer(Exception): + def __init__(self, msg): + Exception.__init__(self) + self.msg = msg + def __str__(self): + return self.msg + +class Utils(object): + '''Groups all helper functions here''' + @staticmethod + def highlight(content): + '''Add ANSI color to content to get it highlighted on terminal''' + return '\x1b[33m%s\x1b[m' % content + + @staticmethod + def info(msg, newline=True): + sys.stdout.write(msg) + if newline: + sys.stdout.write('\n') + + @staticmethod + def error(msg): + '''Print error and exit''' + print() + print('Error: ' + msg) + sys.exit(1) + + @staticmethod + def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Run a program and wait it to finish, and return its exit code. The + standard output of this program is supressed. + + ''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(argv, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env) + return proc.wait() + + @staticmethod + def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Like run_argv but specify a command line string instead of argv''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(cmdline, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env, + shell=True) + return proc.wait() + + @staticmethod + def prepend_env_value(name, value, env=None, seperator=':'): + '''prepend a new value to a list''' + if env is None: + env = os.environ + + try: + current_value = env[name] + except KeyError: + current_value = '' + + new_value = value + if current_value: + new_value += seperator + current_value + + env[name] = new_value + + @staticmethod + def must_mkdir(path): + '''Create a directory, exit on failure''' + try: + os.mkdir(path) + except OSError as e: + Utils.error('failed to create directory %s:%s' % (path, e)) + + @staticmethod + def find_in_path(prog): + if 'win32' in sys.platform: + sep = ';' + else: + sep = ':' + + dirs = os.environ['PATH'].split(sep) + for d in dirs: + d = d.strip() + if d == '': + continue + path = os.path.join(d, prog) + if os.path.exists(path): + return path + + return None + + @staticmethod + def read_config(fn=None): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + cp = configparser.ConfigParser() + cp.optionxform = str + if fn: + cp.read(fn) + + return cp + + @staticmethod + def write_config(cp, fn): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + with open(fn, 'w') as fp: + cp.write(fp) + + @staticmethod + def ask_question(desc, + key=None, + note=None, + default=None, + validate=None, + yes_or_no=False, + password=False): + '''Ask a question, return the answer. + @desc description, e.g. "What is the port of ccnet?" + + @key a name to represent the target of the question, e.g. "port for + ccnet server" + + @note additional information for the question, e.g. "Must be a valid + port number" + + @default the default value of the question. If the default value is + not None, when the user enter nothing and press [ENTER], the default + value would be returned + + @validate a function that takes the user input as the only parameter + and validate it. It should return a validated value, or throws an + "InvalidAnswer" exception if the input is not valid. + + @yes_or_no If true, the user must answer "yes" or "no", and a boolean + value would be returned + + @password If true, the user input would not be echoed to the + console + + ''' + assert key or yes_or_no + # Format description + print() + if note: + desc += '\n' + note + + desc += '\n' + if yes_or_no: + desc += '[ yes or no ]' + else: + if default: + desc += '[ default "%s" ]' % default + else: + desc += '[ %s ]' % key + + desc += ' ' + while True: + # prompt for user input + if password: + answer = getpass.getpass(desc).strip() + else: + answer = input(desc).strip() + + # No user input: use default + if not answer: + if default: + answer = default + else: + continue + + # Have user input: validate answer + if yes_or_no: + if answer not in ['yes', 'no']: + print(Utils.highlight('\nPlease answer yes or no\n')) + continue + else: + return answer == 'yes' + else: + if validate: + try: + return validate(answer) + except InvalidAnswer as e: + print(Utils.highlight('\n%s\n' % e)) + continue + else: + return answer + + @staticmethod + def validate_port(port): + try: + port = int(port) + except ValueError: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + if port <= 0 or port > 65535: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + return port + + @staticmethod + def get_python_executable(): + '''Find a suitable python executable''' + try_list = [ + 'python3', + ] + + for prog in try_list: + path = Utils.find_in_path(prog) + if path is not None: + return path + + path = os.environ.get('PYTHON', 'python') + + if not path: + Utils.error('Can not find python executable') + + return path + + @staticmethod + def pkill(process): + '''Kill the program with the given name''' + argv = [ + 'pkill', '-f', process + ] + + Utils.run_argv(argv) + +class EnvManager(object): + '''System environment and directory layout''' + def __init__(self): + self.install_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + self.top_dir = os.path.dirname(self.install_path) + self.bin_dir = os.path.join(self.install_path, 'seafile', 'bin') + self.central_config_dir = os.path.join(self.top_dir, 'conf') + + self.pro_data_dir = os.path.join(self.top_dir, 'pro-data') + self.pro_program_dir = os.path.join(self.install_path, 'pro') + self.pro_pylibs_dir = os.path.join(self.pro_program_dir, 'python') + self.pro_misc_dir = os.path.join(self.pro_program_dir, 'misc') + + self.seafes_dir = os.path.join(self.pro_pylibs_dir, 'seafes') + self.seahub_dir = os.path.join(self.install_path, 'seahub') + + self.ccnet_dir = os.path.join(self.top_dir, 'ccnet') + self.seafile_dir = os.path.join(self.top_dir, 'seafile-data') + self.central_config_dir = os.path.join(self.top_dir, 'conf') + self.seafile_rpc_pipe_path = os.path.join(self.install_path, 'runtime'); + + def get_seahub_env(self): + '''Prepare for seahub syncdb''' + env = dict(os.environ) + env['CCNET_CONF_DIR'] = self.ccnet_dir + env['SEAFILE_CONF_DIR'] = self.seafile_dir + env['SEAFILE_CENTRAL_CONF_DIR'] = self.central_config_dir + env['SEAFILE_RPC_PIPE_PATH'] = self.seafile_rpc_pipe_path + env['SEAFES_DIR'] = self.seafes_dir + env['SEAHUB_DIR'] = self.seahub_dir + self.setup_python_path(env) + return env + + def setup_python_path(self, env): + '''And PYTHONPATH and CCNET_CONF_DIR/SEAFILE_CONF_DIR to env, which is + needed by seahub + + ''' + extra_python_path = [ + self.pro_pylibs_dir, + + os.path.join(self.top_dir, 'conf'), # LDAP sync has to access seahub_settings.py + os.path.join(self.install_path, 'seahub', 'thirdpart'), + os.path.join(self.install_path, 'seahub-extra'), + os.path.join(self.install_path, 'seahub-extra', 'thirdparts'), + + os.path.join(self.install_path, 'seafile/lib/python3/site-packages'), + os.path.join(self.install_path, 'seafile/lib64/python3/site-packages'), + ] + + for path in extra_python_path: + Utils.prepend_env_value('PYTHONPATH', path, env=env) + +######################## +## END helper functions +######################## + +class Elasticsearch(object): + def __init__(self): + self.es_executable = os.path.join(env_mgr.pro_program_dir, + 'elasticsearch', 'bin', 'elasticsearch') + + self.es_logs_dir = os.path.join(env_mgr.pro_data_dir, 'search', 'logs') + self.es_data_dir = os.path.join(env_mgr.pro_data_dir, 'search', 'data') + + def start(self): + '''Start Elasticsearch. We use -D command line args to specify the + location of logs and data + + ''' + argv = [ + self.es_executable, + '-Des.path.logs=%s' % self.es_logs_dir, + '-Des.path.data=%s' % self.es_data_dir, + ] + Utils.run_argv(argv, suppress_stdout=True, suppress_stderr=True) + + def stop(self): + Utils.pkill('org.elasticsearch.bootstrap.ElasticSearch') + + +class DBConf(object): + '''Abstract class for database configuration''' + TYPE_SQLITE = 'sqlite' + TYPE_MYSQL = 'mysql' + + DB_SECTION = 'DATABASE' + def __init__(self, db_type): + self.db_type = db_type + + def generate_conf(self, config): + raise NotImplementedError + + def create_extra_tables(self): + raise NotImplementedError + + def generate_config_text(self): + config = Utils.read_config() + self.generate_conf(config) + + buf = io.StringIO() + config.write(buf) + buf.flush() + + return buf.getvalue() + +class MySQLDBConf(DBConf): + def __init__(self): + DBConf.__init__(self, self.TYPE_MYSQL) + + self.mysql_host = '' + self.mysql_port = '' + self.mysql_user = '' + self.mysql_password = '' + self.mysql_db = '' + + self.conn = None + + def generate_conf(self, config): + # [DATABASE] + # type=mysql + # path=x.db + # username=seafevents + # password=seafevents + # name=seafevents + # host=localhost + config.add_section(self.DB_SECTION) + config.set(self.DB_SECTION, 'type', 'mysql') + + if self.mysql_host: + config.set(self.DB_SECTION, 'host', self.mysql_host) + + if self.mysql_port: + config.set(self.DB_SECTION, 'port', str(self.mysql_port)) + + config.set(self.DB_SECTION, 'username', self.mysql_user) + config.set(self.DB_SECTION, 'password', self.mysql_password) + config.set(self.DB_SECTION, 'name', self.mysql_db) + + def create_extra_tables(self): + self.get_conn() + sql_file = os.path.join(env_mgr.pro_misc_dir, 'seahub_extra.mysql.sql') + with open(sql_file, 'r') as fp: + content = fp.read() + + sqls = content.split(';') + + for sql in sqls: + sql = sql.strip() + if not sql: + continue + + print('>>> sql is', sql, len(sql)) + self.exec_sql(sql) + + def exec_sql(self, sql): + cursor = self.conn.cursor() + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to create extra tables: %s' % e.args[1]) + else: + Utils.error('Failed to create extra tables: %s' % e) + + def get_conn(self): + print('host is', self.mysql_host) + print('port is', self.mysql_port) + kwargs = dict(user=self.mysql_user, + passwd=self.mysql_password, + db=self.mysql_db) + if self.mysql_port: + kwargs['port'] = self.mysql_port + if self.mysql_host: + kwargs['host'] = self.mysql_host + + try: + self.conn = pymysql.connect(**kwargs) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to connect to mysql database %s: %s' % (self.mysql_db, e.args[1])) + else: + Utils.error('Failed to connect to mysql database %s: %s' % (self.mysql_db, e)) + +class SQLiteDBConf(DBConf): + def __init__(self): + DBConf.__init__(self, self.TYPE_SQLITE) + self.db_path = os.path.join(env_mgr.pro_data_dir, 'seafevents.db') + + def generate_conf(self, config): + # [DATABASE] + # type=sqlite3 + # path=x.db + config.add_section(self.DB_SECTION) + config.set(self.DB_SECTION, 'type', 'sqlite3') + config.set(self.DB_SECTION, 'path', self.db_path) + + def create_extra_tables(self): + seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db') + sql_file = os.path.join(env_mgr.pro_misc_dir, 'seahub_extra.sqlite3.sql') + + Utils.info('Create extra database tables ... ', newline=False) + cmd = 'sqlite3 %s < %s' % (seahub_db, sql_file) + if os.system(cmd) != 0: + Utils.error('\nfailed to create seahub extra database tables') + Utils.info('Done') + + +class ProfessionalConfigurator(object): + '''Main abstract class for the config process ''' + def __init__(self, args, migrate=False): + self.args = args + self.migrate = migrate + self.db_type = '' + self.db_config = None # database config strategy + self.seafevents_conf = os.path.join(env_mgr.central_config_dir, 'seafevents.conf') + + def check_pre_condition(self): + raise NotImplementedError + + def config(self): + raise NotImplementedError + + def generate(self): + self.generate_seafevents_conf() + + def generate_seafevents_conf(self): + template = '''\ +%(db_config_text)s + +[AUDIT] +enabled = true + +[INDEX FILES] +enabled = true +interval = 10m + +highlight = fvh + +## If true, indexes the contents of office/pdf files while updating search index +## Note: If you change this option from "false" to "true", then you need to clear the search index and update the index again. See the FAQ for details. +index_office_pdf = true + +[SEAHUB EMAIL] +enabled = true + +## interval of sending Seahub email. Can be s(seconds), m(minutes), h(hours), d(days) +interval = 30m + +# Enable statistics +[STATISTICS] +enabled=true +''' + db_config_text = self.db_config.generate_config_text() + if not os.path.exists(env_mgr.pro_data_dir): + os.makedirs(env_mgr.pro_data_dir) + os.chmod(env_mgr.pro_data_dir, 0o700) + + with open(self.seafevents_conf, 'w') as fp: + fp.write(template % dict(db_config_text=db_config_text)) + +class MigratingProfessionalConfigurator(ProfessionalConfigurator): + '''This scripts is used standalone to migrate from community version to + professional version + + ''' + def __init__(self, args): + ProfessionalConfigurator.__init__(self, args, migrate=True) + + def check_pre_condition(self): + self.check_java() + + def config(self): + self.detect_db_type() + # self.create_extra_tables() + self.update_avatars_link() + + def detect_db_type(self): + '''Read database info from seahub_settings.py''' + sys.path.insert(0, env_mgr.central_config_dir) + try: + from seahub_settings import DATABASES # pylint: disable=F0401 + except ImportError: + print('Failed to import "DATABASES" from seahub_settings.py, assuming sqlite3') + self.db_config = SQLiteDBConf() + return + + try: + default_config = DATABASES['default'] + if default_config['ENGINE'] == 'django.db.backends.mysql': + db_config = MySQLDBConf() + db_config.mysql_host = default_config.get('HOST', '') + db_config.mysql_port = default_config.get('PORT', '') + db_config.mysql_user = default_config.get('USER', '') + db_config.mysql_password = default_config.get('PASSWORD', '') + db_config.mysql_db = default_config['NAME'] + + if db_config.mysql_port: + db_config.mysql_port = int(db_config.mysql_port) + + print('Your seafile server is using mysql') + + self.db_config = db_config + else: + print('Your seafile server is using sqlite3') + self.db_config = SQLiteDBConf() + + except KeyError: + Utils.error('Error in your config %s' % \ + os.path.join(env_mgr.top_dir, 'seahub_settings.py')) + + def update_avatars_link(self): + minor_upgrade_script = os.path.join(env_mgr.install_path, 'upgrade', 'minor-upgrade.sh') + argv = [ + minor_upgrade_script + ] + + if Utils.run_argv(argv) != 0: + Utils.error('failed to update avatars folder') + + def check_java(self): + Utils.info('\nChecking java ... ', newline=False) + if not Utils.find_in_path('java'): + msg = '''\nJava is not found. instal it first.\n + On Debian/Ubuntu: apt-get install default-jre + On CentOS/RHEL: yum install jre + ''' + Utils.error(msg) + + Utils.info('Done') + + def create_extra_tables(self): + '''Create seahub-extra database tables''' + self.db_config.create_extra_tables() + +class SetupProfessionalConfigurator(ProfessionalConfigurator): + '''This script is invokded by setup-seafile.sh/setup-seafile-mysql.sh to + generate seafile pro related conf + + To setup sqlite3: + ./pro.py setup + + To setup mysql: + ./pro.py setup --mysql + --mysql_host= + --mysql_port= + --mysql_user= + --mysql_password= + --mysql_db= + + ''' + def __init__(self, args): + ProfessionalConfigurator.__init__(self, args, migrate=False) + + def config(self): + if self.args.mysql: + db_config = MySQLDBConf() + db_config.mysql_host = self.args.mysql_host + db_config.mysql_port = self.args.mysql_port + db_config.mysql_user = self.args.mysql_user + db_config.mysql_password = self.args.mysql_password + db_config.mysql_db = self.args.mysql_db + else: + db_config = SQLiteDBConf() + + self.db_config = db_config + + def check_pre_condition(self): + pass + +def do_setup(args): + global pro_config + + if args.migrate: + pro_config = MigratingProfessionalConfigurator(args) + else: + pro_config = SetupProfessionalConfigurator(args) + + pro_config.check_pre_condition() + pro_config.config() + pro_config.generate() + +def handle_search_commands(args): + '''provide search related utility''' + if args.update: + update_search_index() + elif args.clear: + delete_search_index() + +def get_seafes_env(): + env = env_mgr.get_seahub_env() + events_conf = os.path.join(env_mgr.central_config_dir, 'seafevents.conf') + + env['EVENTS_CONFIG_FILE'] = events_conf + + return env + +def update_search_index(): + argv = [ + Utils.get_python_executable(), + '-m', 'seafes.index_local', + '--loglevel', 'debug', + 'update', + ] + + Utils.info('\nUpdating search index, this may take a while...\n') + + Utils.run_argv(argv, env=get_seafes_env()) + +def delete_search_index(): + choice = None + while choice not in ('y', 'n', ''): + prompt = 'Delete seafile search index ([y]/n)? ' + choice = input(prompt).strip() + + if choice == 'n': + return + + argv = [ + Utils.get_python_executable(), + '-m', 'seafes.index_local', + '--loglevel', 'debug', + 'clear', + ] + + Utils.info('\nDelete search index, this may take a while...\n') + + Utils.run_argv(argv, env=get_seafes_env()) + +def handle_ldap_sync_commands(args): + if args.test: + argv = [ + Utils.get_python_executable(), + '-m', 'seafevents.ldap_syncer.run_ldap_sync', + '-t', + ] + else: + argv = [ + Utils.get_python_executable(), + '-m', 'seafevents.ldap_syncer.run_ldap_sync', + ] + + Utils.run_argv(argv, env=env_mgr.get_seahub_env()) + +def handle_virus_scan_commands(args): + argv = [ + Utils.get_python_executable(), + '-m', 'seafevents.virus_scanner.run_virus_scan', + '-c', os.path.join(env_mgr.central_config_dir, 'seafevents.conf'), + ] + + Utils.run_argv(argv, env=env_mgr.get_seahub_env()) + +pro_config = None +env_mgr = EnvManager() + +def main(): + try: + import argparse + except ImportError: + sys.path.insert(0, glob.glob(os.path.join(env_mgr.pro_pylibs_dir, 'argparse*.egg'))[0]) + import argparse + + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(title='subcommands', description='') + + # setup + parser_setup = subparsers.add_parser('setup', help='Setup extra components of seafile pro') + parser_setup.set_defaults(func=do_setup) + parser_setup.add_argument('--migrate', help='migrate from community version', action='store_true') + + # for non-migreate setup + parser_setup.add_argument('--mysql', help='use mysql', action='store_true') + parser_setup.add_argument('--mysql_host') + parser_setup.add_argument('--mysql_port') + parser_setup.add_argument('--mysql_user') + parser_setup.add_argument('--mysql_password') + parser_setup.add_argument('--mysql_db') + + # search + parser_search = subparsers.add_parser('search', help='search related utility commands') + parser_search.add_argument('--update', help='update seafile search index', action='store_true') + parser_search.add_argument('--clear', help='delete seafile search index', action='store_true') + parser_search.set_defaults(func=handle_search_commands) + + # ldapsync + parser_ldap_sync = subparsers.add_parser('ldapsync', help='ldap sync commands') + parser_ldap_sync.add_argument('-t', '--test', help='test ldap sync', action='store_true') + parser_ldap_sync.set_defaults(func=handle_ldap_sync_commands) + + # virus scan + parser_virus_scan = subparsers.add_parser('virus_scan', help='virus scan commands') + parser_virus_scan.set_defaults(func=handle_virus_scan_commands) + + if len(sys.argv) == 1: + print(parser.format_help()) + return + + args = parser.parse_args() + args.func(args) + +if __name__ == '__main__': + main() diff --git a/scripts/remove-objs.py b/scripts/remove-objs.py new file mode 100644 index 0000000000..445e415390 --- /dev/null +++ b/scripts/remove-objs.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +import sys +import os +import logging +from seafobj.objstore_factory import objstore_factory + +logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) + +def main(argv): + repo_id = argv[1] + orig_storage_id = argv[2] + + dtypes = ['commits', 'fs', 'blocks'] + for dtype in dtypes: + if 'OBJECT_LIST_FILE_PATH' in os.environ: + object_list_file_path = '.'.join(['_'.join([os.environ['OBJECT_LIST_FILE_PATH'], repo_id]), dtype]) + else: + logging.warning('OBJECT_LIST_FILE_PATH environment does not exist.') + sys.exit() + + obj_stores = objstore_factory.get_obj_stores(dtype) + #If these storage ids passed in do not exist in conf, stop migrate this repo. + if orig_storage_id not in obj_stores: + logging.warning('Storage id passed in does not exist in configuration.\n') + sys.exit() + + orig_store = obj_stores[orig_storage_id] + + with open(object_list_file_path, 'r') as f: + for line in f: + obj = line.rstrip('\n').split('/', 1) + try: + orig_store.remove_obj(obj[0], obj[1]) + except Exception as e: + logging.warning('Failed to remove object %s from repo %s:%s' % (obj[1], obj[0], e)) + + logging.info('The process of remove repo [%s] is over.\n', repo_id) + +if __name__ == '__main__': + main(sys.argv) diff --git a/scripts/remove-objs.sh b/scripts/remove-objs.sh new file mode 100755 index 0000000000..41d8043e61 --- /dev/null +++ b/scripts/remove-objs.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +seafile_rpc_pipe_path=${INSTALLPATH}/runtime +remove=${INSTALLPATH}/remove-objs.py + +script_name=$0 +function usage () { + echo "usage : " + echo " ./$(basename ${script_name})" \ + "" \ + "" \ + echo"" +} + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function do_remove () { + export CCNET_CONF_DIR=${default_ccnet_conf_dir} + export SEAFILE_CONF_DIR=${default_seafile_data_dir} + export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} + export SEAFILE_RPC_PIPE_PATH=${seafile_rpc_pipe_path} + export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + $PYTHON ${remove} $@ +} + +check_python_executable; + +if [ $# -gt 0 ]; +then + for param in $@; + do + if [ ${param} = "-h" -o ${param} = "--help" ]; + then + usage; + exit 1; + fi + done +fi + +if [ $# -ne 2 ]; +then + usage; + exit 1; +fi + +do_remove $@; + +echo "Done." diff --git a/scripts/reset-admin.sh b/scripts/reset-admin.sh new file mode 100755 index 0000000000..2c469f574c --- /dev/null +++ b/scripts/reset-admin.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +central_config_dir=${TOPDIR}/conf +pro_pylibs_dir=${INSTALLPATH}/pro/python +seafesdir=$pro_pylibs_dir/seafes + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function prepare_seahub_log_dir() { + logdir=${TOPDIR}/logs + if ! [[ -d ${logsdir} ]]; then + if ! mkdir -p "${logdir}"; then + echo "ERROR: failed to create logs dir \"${logdir}\"" + exit 1 + fi + fi + export SEAHUB_LOG_DIR=${logdir} +} + +check_python_executable; +validate_seafile_data_dir; +prepare_seahub_log_dir; + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CONF_DIR=${default_seafile_data_dir} +export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime + +if [[ -d ${INSTALLPATH}/pro ]]; then + export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/ + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts + export SEAFES_DIR=$seafesdir + export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime +fi + +manage_py=${INSTALLPATH}/seahub/manage.py +exec "$PYTHON" "$manage_py" createsuperuser diff --git a/scripts/run_index_master.sh b/scripts/run_index_master.sh new file mode 100755 index 0000000000..12ec2d43e3 --- /dev/null +++ b/scripts/run_index_master.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +central_config_dir=${TOPDIR}/conf +pro_pylibs_dir=${INSTALLPATH}/pro/python +pidfile=${INSTALLPATH}/runtime/index_master.pid + + +script_name=$0 +function usage () { + echo "Usage: " + echo + echo " $(basename ${script_name}) { start | stop | restart | python-env }" +} + +if [[ $1 != "start" && $1 != "stop" && $1 != "restart" && $1 != "python-env" ]]; then + usage; + exit 1; +fi + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function prepare_log_dir() { + logdir=${TOPDIR}/logs + if ! [[ -d ${logsdir} ]]; then + if ! mkdir -p "${logdir}"; then + echo "ERROR: failed to create logs dir \"${logdir}\"" + exit 1 + fi + fi + export LOG_DIR=${logdir} +} + +function before_start() { + check_python_executable; + prepare_log_dir; + validate_seafile_data_dir; + + export SEAFILE_CONF_DIR=${default_seafile_data_dir} + export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir} + export SEAFES_DIR=$pro_pylibs_dir/seafes + export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/ + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts + export EVENTS_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/seafevents.conf + export INDEX_MASTER_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/index-master.conf +} + +run_python() { + before_start; + $PYTHON ${@:2} +} + +start_index_master() { + before_start; + nohup $PYTHON -m seafes.index_master --loglevel debug --logfile ${logdir}/index_master.log start & echo $! > $pidfile + sleep 2 + if ! pgrep -f "seafes.index_master" 2>/dev/null 1>&2; then + printf "\033[33mError:Index master failed to start.\033[m\n" + echo "Please try to run \"./run_index_master.sh start\" again" + exit 1; + fi + echo + echo "Index master is started" + echo +} + +stop_index_master() { + if pgrep -f "seafes.index_worker" 2>/dev/null 1>&2; then + printf "\033[33mError:Index worker need be stopped first.\033[m\n" + exit 1; + fi + + if [[ -f ${pidfile} ]]; then + pid=$(cat "${pidfile}") + echo "Stopping index master ..." + kill ${pid} + rm -f ${pidfile} + return 0 + else + echo "Index master is not running" + fi +} + +case $1 in + "start" ) + start_index_master; + ;; + "stop" ) + stop_index_master; + ;; + "restart" ) + stop_index_master + sleep 2 + start_index_master + ;; + "python-env" ) + run_python "$@" + ;; +esac + diff --git a/scripts/run_index_worker.sh b/scripts/run_index_worker.sh new file mode 100755 index 0000000000..e5f5a9e733 --- /dev/null +++ b/scripts/run_index_worker.sh @@ -0,0 +1,122 @@ +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +central_config_dir=${TOPDIR}/conf +pro_pylibs_dir=${INSTALLPATH}/pro/python +pidfile=${INSTALLPATH}/runtime/index_worker.pid + + +script_name=$0 +function usage () { + echo "Usage: " + echo + echo " $(basename ${script_name}) { start | stop | restart }" +} + +if [[ $1 != "start" && $1 != "stop" && $1 != "restart" ]]; then + usage; + exit 1; +fi + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function prepare_log_dir() { + logdir=${TOPDIR}/logs + if ! [[ -d ${logsdir} ]]; then + if ! mkdir -p "${logdir}"; then + echo "ERROR: failed to create logs dir \"${logdir}\"" + exit 1 + fi + fi + export LOG_DIR=${logdir} +} + +function before_start() { + check_python_executable; + prepare_log_dir; + validate_seafile_data_dir; + + export SEAFILE_CONF_DIR=${default_seafile_data_dir} + export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir} + export SEAFES_DIR=$pro_pylibs_dir/seafes + export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/ + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts + export EVENTS_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/seafevents.conf + export INDEX_SLAVE_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/index-slave.conf +} + +start_index_worker() { + before_start; + nohup $PYTHON -m seafes.index_worker --loglevel debug --logfile ${logdir}/index_worker.log start & echo $! > $pidfile + sleep 2 + if ! pgrep -f "seafes.index_worker" 2>/dev/null 1>&2; then + printf "\033[33mError:Index worker failed to start.\033[m\n" + echo "Please try to run \"./run_index_worker.sh start\" again" + exit 1; + fi + echo + echo "Index worker is started" + echo +} + +stop_index_worker() { + if [[ -f ${pidfile} ]]; then + pid=$(cat "${pidfile}") + echo "Stopping index worker ..." + kill ${pid} + rm -f ${pidfile} + return 0 + else + echo "Index worker is not running" + fi +} + +case $1 in + "start" ) + start_index_worker; + ;; + "stop" ) + stop_index_worker; + ;; + "restart" ) + stop_index_worker + sleep 2 + start_index_worker + ;; +esac diff --git a/scripts/seaf-backup-cmd.py b/scripts/seaf-backup-cmd.py new file mode 100755 index 0000000000..bda1b88f2e --- /dev/null +++ b/scripts/seaf-backup-cmd.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +#coding: utf-8 + +import json +import argparse + +from seaserv import seafile_api +from pysearpc import SearpcError + +def show_backup_status(args): + ret_str = seafile_api.get_backup_status() + ret_dict = json.loads(ret_str) + print('Total number of libraries: %s' % ret_dict['n_total']) + print('Number of synchronized libraries: %s' % ret_dict['n_synced']) + print('Number of libraries waiting for sync: %s' % ret_dict['n_waiting']) + print('Number of libraries syncing: %s' % ret_dict['n_syncing']) + print('Number of libraries failed to sync: %s\n' % ret_dict['n_error']) + print('List of syncing libraries:') + for repo in ret_dict['syncing_repos']: + print(repo) + print('') + print('List of libraries failed to sync:') + for repo in ret_dict['error_repos']: + print(repo) + +def sync_repo(args): + if len(args.repo_id) != 36: + print('Invalid repo id %s.' % args.repo_id) + return + + try: + seafile_api.sync_repo_manually(args.repo_id, 1 if args.force else 0) + except SearpcError as e: + print('Failed to sync repo %s: %s.' % (args.repo_id, e)) + else: + print('Sync repo %s successfully.' % args.repo_id) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + subparser = parser.add_subparsers(title='subcommands', description='') + status_parser = subparser.add_parser('status', help='get backup status') + status_parser.set_defaults(func=show_backup_status) + + sync_parser = subparser.add_parser('sync', help='sync repo') + sync_parser.add_argument('-f', '--force', help='force sync repo', action='store_true') + sync_parser.add_argument('repo_id', help='repo id to sync') + sync_parser.set_defaults(func=sync_repo) + + args = parser.parse_args() + args.func(args) diff --git a/scripts/seaf-backup-cmd.sh b/scripts/seaf-backup-cmd.sh new file mode 100755 index 0000000000..ebf7a579e0 --- /dev/null +++ b/scripts/seaf-backup-cmd.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +# This is a wrapper shell script for the real seaf-backup command. +# It prepares necessary environment variables and exec the real script. + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +check_python_executable + +# seafile cli client requires the argparse module +if ! $PYTHON -c 'import argparse' 2>/dev/null 1>&2; then + echo + echo "Python argparse module is required" + echo "see [https://pypi.python.org/pypi/argparse]" + echo + exit 1 +fi + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +central_config_dir=${TOPDIR}/conf +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data + + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +validate_seafile_data_dir + +SEAFILE_PYTHON_PATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart + +SEAF_BACKUP_CMD=${INSTALLPATH}/seaf-backup-cmd.py + +export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime +export PYTHONPATH=${SEAFILE_PYTHON_PATH}:${PYTHONPATH} +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CONF_DIR=${default_seafile_data_dir} +export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir} +$PYTHON ${SEAF_BACKUP_CMD} "$@" diff --git a/scripts/seaf-encrypt.sh b/scripts/seaf-encrypt.sh new file mode 100755 index 0000000000..0c97c1fe77 --- /dev/null +++ b/scripts/seaf-encrypt.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +seaf_encrypt=${INSTALLPATH}/seafile/bin/seaf-encrypt +seaf_encrypt_opts="" + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + echo -e "$(basename ${script_name}) \n" \ + "-f \n" \ + "-e " + echo "" +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function check_component_running() { + name=$1 + cmd=$2 + if pid=$(pgrep -f "$cmd" 2>/dev/null); then + echo "[$name] is running, pid $pid. You can stop it by: " + echo + echo " kill $pid" + echo + echo "Stop it and try again." + echo + exit + fi +} + +function validate_already_running () { + if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then + echo "seafile server is still running, stop it by \"seafile.sh stop\"" + echo + exit 1; + fi + + check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}" + check_component_running "seafdav" "wsgidav.server.server_cli" +} + +function run_seaf_encrypt () { + validate_seafile_data_dir; + + validate_already_running; + + echo "Starting seaf-encrypt, please wait ..." + + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_encrypt} \ + -c "${default_conf_dir}" \ + -d "${default_seafile_data_dir}" \ + ${seaf_encrypt_opts} + + echo "seaf-encrypt run done" + echo +} + +if [ $# -gt 0 ]; +then + for param in $@; + do + if [ ${param} = "-h" -o ${param} = "--help" ]; + then + usage; + exit 1; + fi + done +fi + +seaf_encrypt_opts=$@ +run_seaf_encrypt; + +echo "Done." diff --git a/scripts/seaf-fsck.sh b/scripts/seaf-fsck.sh new file mode 100755 index 0000000000..6c08be517b --- /dev/null +++ b/scripts/seaf-fsck.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +seaf_fsck=${INSTALLPATH}/seafile/bin/seaf-fsck + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + echo "$(basename ${script_name}) [-h/--help] [-r/--repair] [-E/--export path_to_export] [repo_id_1 [repo_id_2 ...]]" + echo "" +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function run_seaf_fsck () { + validate_seafile_data_dir; + + echo "Starting seaf-fsck, please wait ..." + echo + + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_fsck} \ + -c "${default_ccnet_conf_dir}" -d "${default_seafile_data_dir}" \ + -F "${default_conf_dir}" \ + ${seaf_fsck_opts} + + echo "seaf-fsck run done" + echo +} + +if [ $# -gt 0 ]; +then + for param in $@; + do + if [ ${param} = "-h" -o ${param} = "--help" ]; + then + usage; + exit 1; + fi + done +fi + +seaf_fsck_opts=$@ +run_seaf_fsck; + +echo "Done." diff --git a/scripts/seaf-fuse.sh b/scripts/seaf-fuse.sh new file mode 100755 index 0000000000..aad6c5d699 --- /dev/null +++ b/scripts/seaf-fuse.sh @@ -0,0 +1,122 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +seaf_fuse=${INSTALLPATH}/seafile/bin/seaf-fuse + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + echo "$(basename ${script_name}) { start | stop | restart } " + echo "" +} + +# check args +if [[ "$1" != "start" && "$1" != "stop" && "$1" != "restart" ]]; then + usage; + exit 1; +fi + +if [[ ($1 == "start" || $1 == "restart" ) && $# -lt 2 ]]; then + usage; + exit 1 +fi + +if [[ $1 == "stop" && $# != 1 ]]; then + usage; + exit 1 +fi + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function validate_already_running () { + if pid=$(pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null); then + echo "seaf-fuse is already running, pid $pid" + echo + exit 1; + fi +} + +function warning_if_seafile_not_running () { + if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo + echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?" + echo + fi +} + +function start_seaf_fuse () { + validate_already_running; + warning_if_seafile_not_running; + validate_seafile_data_dir; + + echo "Starting seaf-fuse, please wait ..." + + logfile=${TOPDIR}/logs/seaf-fuse.log + + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_fuse} \ + -c "${default_ccnet_conf_dir}" \ + -d "${default_seafile_data_dir}" \ + -F "${default_conf_dir}" \ + -l "${logfile}" \ + "$@" + + sleep 2 + + # check if seaf-fuse started successfully + if ! pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo "Failed to start seaf-fuse" + exit 1; + fi + + echo "seaf-fuse started" + echo +} + +function stop_seaf_fuse() { + if ! pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo "seaf-fuse not running yet" + return 1; + fi + + echo "Stopping seaf-fuse ..." + pkill -SIGTERM -f "seaf-fuse -c ${default_ccnet_conf_dir}" + return 0 +} + +function restart_seaf_fuse () { + stop_seaf_fuse + sleep 2 + start_seaf_fuse $@ +} + +case $1 in + "start" ) + shift + start_seaf_fuse $@; + ;; + "stop" ) + stop_seaf_fuse; + ;; + "restart" ) + shift + restart_seaf_fuse $@; +esac + +echo "Done." diff --git a/scripts/seaf-gc.sh b/scripts/seaf-gc.sh new file mode 100755 index 0000000000..24e59899fc --- /dev/null +++ b/scripts/seaf-gc.sh @@ -0,0 +1,133 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +check_db_py=${INSTALLPATH}/check-db-type.py +seaf_gc=${INSTALLPATH}/seafile/bin/seafserv-gc +seaf_gc_opts="" + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + if [[ -d ${INSTALLPATH}/pro ]]; then + echo "$(basename ${script_name}) [--dry-run | -D] [--rm-deleted | -r] [--rm-fs | -R] [repo-id1] [repo-id2]" + else + echo "$(basename ${script_name}) [--dry-run | -D] [--rm-deleted | -r] [repo-id1] [repo-id2]" + fi + echo "" +} + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function check_component_running() { + name=$1 + cmd=$2 + if pid=$(pgrep -f "$cmd" 2>/dev/null); then + echo "[$name] is running, pid $pid. You can stop it by: " + echo + echo " kill $pid" + echo + echo "Stop it and try again." + echo + exit + fi +} + +function validate_already_running () { + if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then + echo "seafile server is still running, stop it by \"seafile.sh stop\"" + echo + exit 1; + fi + + check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}" + check_component_running "fileserver" "fileserver -c ${default_ccnet_conf_dir}" + check_component_running "seafdav" "wsgidav.server.server_cli" +} + +function run_seaf_gc () { + + if [[ -d ${INSTALLPATH}/pro ]]; then + seafile_conf=${default_conf_dir}/seafile.conf + db_type=$($PYTHON $check_db_py $seafile_conf) + + if [ $db_type = "sqlite" ]; then + validate_already_running; + fi + else + validate_already_running; + fi + + validate_seafile_data_dir; + + echo "Starting seafserv-gc, please wait ..." + + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_gc} \ + -c "${default_ccnet_conf_dir}" \ + -d "${default_seafile_data_dir}" \ + -F "${default_conf_dir}" \ + ${seaf_gc_opts} + + echo "seafserv-gc run done" + echo +} + +check_python_executable; + +if [ $# -gt 0 ]; +then + for param in $@; + do + if [ ${param} = "-h" -o ${param} = "--help" ]; + then + usage; + exit 1; + fi + done +fi + +seaf_gc_opts=$@ +run_seaf_gc; + +echo "Done." diff --git a/scripts/seaf-import.sh b/scripts/seaf-import.sh new file mode 100755 index 0000000000..4610937ca1 --- /dev/null +++ b/scripts/seaf-import.sh @@ -0,0 +1,95 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +seaf_import=${INSTALLPATH}/seafile/bin/seaf-import +seaf_import_opts="" + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + echo -e "$(basename ${script_name}) \n" \ + "-p \n" \ + "-n \n" \ + "-u " + echo "" +} + + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function check_component_running() { + name=$1 + cmd=$2 + if pid=$(pgrep -f "$cmd" 2>/dev/null); then + echo "[$name] is running, pid $pid. You can stop it by: " + echo + echo " kill $pid" + echo + echo "Stop it and try again." + echo + exit + fi +} + +<<'COMMENT' +function validate_already_running () { + if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then + echo "seafile server is still running, stop it by \"seafile.sh stop\"" + echo + exit 1; + fi + + check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}" + check_component_running "seafdav" "wsgidav.server.server_cli" +} +COMMENT + +function run_seaf_import () { + validate_seafile_data_dir; + +# validate_already_running; + + echo "Starting seaf-import, please wait ..." + + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_import} \ + -c "${default_conf_dir}" \ + -d "${default_seafile_data_dir}" \ + ${seaf_import_opts} + + echo " run done" + echo +} + +if [ $# -gt 0 ]; +then + for param in $@; + do + if [ ${param} = "-h" -o ${param} = "--help" ]; + then + usage; + exit 1; + fi + done +fi + +seaf_import_opts=$@ +run_seaf_import; + +echo "Done." diff --git a/scripts/seafile-background-tasks.sh b/scripts/seafile-background-tasks.sh new file mode 100755 index 0000000000..56bddae76a --- /dev/null +++ b/scripts/seafile-background-tasks.sh @@ -0,0 +1,167 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data + +logdir=${TOPDIR}/logs +pro_pylibs_dir=${INSTALLPATH}/pro/python + +seafevents_conf=${TOPDIR}/conf/seafevents.conf +seafile_background_tasks_log=${logdir}/seafile-background-tasks.log + +seahub_dir=${INSTALLPATH}/seahub +central_config_dir=${TOPDIR}/conf + +export SEAHUB_DIR=${seahub_dir} +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "Usage: " + echo + echo " $(basename "${script_name}") { start | stop | restart }" + echo + echo "" +} + +# Check args +if [[ $1 != "start" && $1 != "stop" && $1 != "restart" ]]; then + usage; + exit 1; +fi + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi + + pidfile=${TOPDIR}/pids/seafile-background-tasks.pid +} + +function ensure_single_instance () { + if pgrep -f "seafevents.background_tasks" 2>/dev/null 1>&2; then + echo "seafile background tasks is already running." + exit 1; + fi +} + +function warning_if_seafile_not_running () { + if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo + echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?" + echo + fi +} + +function prepare_log_dir() { + if ! [[ -d ${logsdir} ]]; then + if ! mkdir -p "${logdir}"; then + echo "ERROR: failed to create logs dir \"${logdir}\"" + exit 1 + fi + fi +} + +function before_start() { + warning_if_seafile_not_running; + ensure_single_instance; + prepare_log_dir; + + export CCNET_CONF_DIR=${default_ccnet_conf_dir} + export SEAFILE_CONF_DIR=${default_seafile_data_dir} + export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir} + export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime + export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/ + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts + # Allow LDAP user sync to import seahub_settings.py + export PYTHONPATH=$PYTHONPATH:${central_config_dir} + export SEAFES_DIR=$pro_pylibs_dir/seafes +} + +function start_seafile_background_tasks () { + before_start; + echo "Starting seafile background tasks ..." + $PYTHON -m seafevents.background_tasks --config-file "${seafevents_conf}" \ + --loglevel debug --logfile "${seafile_background_tasks_log}" -P "${pidfile}" 2>/dev/null 1>&2 & + + # Ensure started successfully + sleep 5 + if ! pgrep -f "seafevents.background_tasks" >/dev/null; then + printf "\033[33mError: failed to start seafile background tasks.\033[m\n" + echo "Please try to run \"./seafile-background-tasks.sh start\" again" + exit 1; + fi +} + +function stop_seafile_background_tasks () { + if [[ -f ${pidfile} ]]; then + pid=$(cat "${pidfile}") + echo "Stopping seafile background tasks ..." + kill "${pid}" + sleep 1 + if ps "${pid}" 2>/dev/null 1>&2 ; then + kill -KILL "${pid}" + fi + pkill -f "soffice.*--invisible --nocrashreport" + rm -f "${pidfile}" + return 0 + else + echo "seafile background tasks is not running" + fi +} + +check_python_executable; +validate_seafile_data_dir; + +case $1 in + "start" ) + start_seafile_background_tasks; + ;; + "stop" ) + stop_seafile_background_tasks; + ;; + "restart" ) + stop_seafile_background_tasks + sleep 2 + start_seafile_background_tasks + ;; +esac + +echo "Done." +echo "" diff --git a/scripts/seafile.sh b/scripts/seafile.sh new file mode 100755 index 0000000000..96b955cbee --- /dev/null +++ b/scripts/seafile.sh @@ -0,0 +1,201 @@ +#!/bin/bash + +### BEGIN INIT INFO +# Provides: seafile +# Required-Start: $local_fs $remote_fs $network +# Required-Stop: $local_fs +# Default-Start: 1 2 3 4 5 +# Default-Stop: +# Short-Description: Starts Seafile Server +# Description: starts Seafile Server +### END INIT INFO + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +central_config_dir=${TOPDIR}/conf +seaf_controller="${INSTALLPATH}/seafile/bin/seafile-controller" +pro_pylibs_dir=${INSTALLPATH}/pro/python +seafesdir=$pro_pylibs_dir/seafes + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export ORIG_LD_LIBRARY_PATH=${LD_LIBRARY_PATH} +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + echo "$(basename ${script_name}) { start | stop | restart } " + echo "" +} + +# check args +if [[ $# != 1 || ( "$1" != "start" && "$1" != "stop" && "$1" != "restart" ) ]]; then + usage; + exit 1; +fi + +function validate_running_user () { + real_data_dir=`readlink -f ${default_seafile_data_dir}` + running_user=`id -un` + data_dir_owner=`stat -c %U ${real_data_dir}` + + if [[ "${running_user}" != "${data_dir_owner}" ]]; then + echo "Error: the user running the script (\"${running_user}\") is not the owner of \"${real_data_dir}\" folder, you should use the user \"${data_dir_owner}\" to run the script." + exit -1; + fi +} + +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +if [[ -d ${INSTALLPATH}/pro ]]; then + export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/ + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts + export SEAFES_DIR=$seafesdir +fi + +function validate_central_conf_dir () { + if [[ ! -d ${central_config_dir} ]]; then + echo "Error: there is no conf/ directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit -1; + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function test_config() { + if ! LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_controller} -t -c "${default_ccnet_conf_dir}" -d "${default_seafile_data_dir}" -F "${central_config_dir}" ; then + exit 1; + fi +} + +function check_component_running() { + name=$1 + cmd=$2 + if pid=$(pgrep -f "$cmd" 2>/dev/null); then + echo "[$name] is running, pid $pid. You can stop it by: " + echo + echo " kill $pid" + echo + echo "Stop it and try again." + echo + exit + fi +} + +function validate_already_running () { + if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then + echo "Seafile controller is already running, pid $pid" + echo + exit 1; + fi + + check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}" + check_component_running "fileserver" "fileserver -c ${default_ccnet_conf_dir}" + check_component_running "seafdav" "wsgidav.server.server_cli" + check_component_running "seafevents" "seafevents.main --config-file ${central_config_dir}" +} + +function test_java { + if ! which java 2>/dev/null 1>&2; then + echo "java is not found on your machine. Please install it first." + exit 1; + fi +} + +function start_seafile_server () { + validate_already_running; + validate_central_conf_dir; + validate_seafile_data_dir; + validate_running_user; + + if [[ -d ${INSTALLPATH}/pro ]]; then + test_config; + test_java; + fi + + echo "Starting seafile server, please wait ..." + + mkdir -p $TOPDIR/logs + + if [[ -d ${INSTALLPATH}/pro ]]; then + if ! LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_controller} -c "${default_ccnet_conf_dir}" -d "${default_seafile_data_dir}" -F "${central_config_dir}"; then + controller_log="$default_seafile_data_dir/controller.log" + echo + echo "Failed to start seafile server. See $controller_log for more details." + echo + exit 1 + fi + else + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_controller} \ + -c "${default_ccnet_conf_dir}" \ + -d "${default_seafile_data_dir}" \ + -F "${central_config_dir}" + fi + + sleep 3 + + # check if seafile server started successfully + if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo "Failed to start seafile server" + exit 1; + fi + + echo "Seafile server started" + echo +} + +function kill_all () { + pkill -f "seaf-server -c ${default_ccnet_conf_dir}" + pkill -f "fileserver -c ${default_ccnet_conf_dir}" + pkill -f "seafevents.main" + pkill -f "convert_server.py" + pkill -f "soffice.*--invisible --nocrashreport" + pkill -f "wsgidav.server.server_cli" +} + +function stop_seafile_server () { + if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo "seafile server not running yet" + kill_all + return 1 + fi + + echo "Stopping seafile server ..." + pkill -SIGTERM -f "seafile-controller -c ${default_ccnet_conf_dir}" + kill_all + + return 0 +} + +function restart_seafile_server () { + stop_seafile_server; + sleep 5 + start_seafile_server; +} + +case $1 in + "start" ) + start_seafile_server; + ;; + "stop" ) + stop_seafile_server; + ;; + "restart" ) + restart_seafile_server; +esac + +echo "Done." diff --git a/scripts/seafobj_migrate.py b/scripts/seafobj_migrate.py new file mode 100755 index 0000000000..0e92830bc4 --- /dev/null +++ b/scripts/seafobj_migrate.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +#coding: utf-8 + +import os +import sys +import logging +from threading import Thread +import queue +import rados + +from seafobj.objstore_factory import SeafObjStoreFactory + +logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) + +class Worker(Thread): + def __init__(self, do_work, task_queue): + Thread.__init__(self) + self.do_work = do_work + self.task_queue = task_queue + + def run(self): + while True: + try: + task = self.task_queue.get() + if task is None: + break + self.do_work(task) + except Exception as e: + logging.warning('Failed to execute task: %s' % e) + finally: + self.task_queue.task_done() + +class ThreadPool(object): + def __init__(self, do_work, nworker=20): + self.do_work = do_work + self.nworker = nworker + self.task_queue = queue.Queue() + + def start(self): + for i in range(self.nworker): + Worker(self.do_work, self.task_queue).start() + + def put_task(self, task): + self.task_queue.put(task) + + def join(self): + self.task_queue.join() + # notify all thread to stop + for i in range(self.nworker): + self.task_queue.put(None) + +class Task(object): + def __init__(self, repo_id, repo_version, obj_id): + self.repo_id = repo_id + self.repo_version = repo_version + self.obj_id = obj_id + +class ObjMigrateWorker(Thread): + def __init__(self, orig_obj_factory, dest_obj_factory, dtype): + Thread.__init__(self) + self.dtype = dtype + self.orig_store = orig_obj_factory.get_obj_store(dtype) + self.dest_store = dest_obj_factory.get_obj_store(dtype) + self.thread_pool = ThreadPool(self.do_work) + + def run(self): + logging.info('Start to migrate [%s] object' % self.dtype) + self.thread_pool.start() + self.migrate() + self.thread_pool.join() + logging.info('Complete migrate [%s] object' % self.dtype) + + def do_work(self, task): + ioctx = self.dest_store.ceph_client.ioctx_pool.get_ioctx(task.repo_id) + try: + ioctx.stat(task.obj_id) + except rados.ObjectNotFound: + try: + data = self.orig_store.read_obj_raw(task.repo_id, task.repo_version, task.obj_id) + except Exception as e: + logging.warning('[%s] Failed to read object %s from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e)) + raise + + try: + ioctx.write_full(task.obj_id, data) + except Exception as e: + logging.warning('[%s] Failed to write object %s of repo %s to Ceph: %s' % (self.dtype, task.obj_id, task.repo_id, e)) + raise + except Exception as e: + logging.warning('[%s] Failed to stat object %s of repo %s in Ceph: %s' % (self.dtype, task.obj_id, task.repo_id, e)) + raise + finally: + self.dest_store.ceph_client.ioctx_pool.return_ioctx(ioctx) + + def migrate(self): + top_path = self.orig_store.obj_dir + for repo_id in os.listdir(top_path): + repo_path = os.path.join(top_path, repo_id) + for spath in os.listdir(repo_path): + obj_path = os.path.join(repo_path, spath) + for lpath in os.listdir(obj_path): + obj_id = spath + lpath + task = Task(repo_id, 1, obj_id) + self.thread_pool.put_task(task) + +def main(): + try: + fs_obj_factory = SeafObjStoreFactory() + os.environ['SEAFILE_CENTRAL_CONF_DIR'] = os.environ['CEPH_SEAFILE_CENTRAL_CONF_DIR'] + except KeyError: + logging.warning('CEPH_SEAFILE_CENTRAL_CONF_DIR environment variable is not set.\n') + sys.exit() + + ceph_obj_factory = SeafObjStoreFactory() + + dtypes = ['commits', 'fs', 'blocks'] + for dtype in dtypes: + ObjMigrateWorker(fs_obj_factory, ceph_obj_factory, dtype).start() + +if __name__ == '__main__': + main() diff --git a/scripts/seahub.conf b/scripts/seahub.conf new file mode 100644 index 0000000000..221c77160b --- /dev/null +++ b/scripts/seahub.conf @@ -0,0 +1,16 @@ +import os + +daemon = True +workers = 3 +threads = 5 + +# Logging +runtime_dir = os.path.dirname(__file__) +pidfile = os.path.join(runtime_dir, 'seahub.pid') +errorlog = os.path.join(runtime_dir, 'error.log') + +# disable access log +#accesslog = os.path.join(runtime_dir, 'access.log') + +# for file upload, we need a longer timeout value (default is only 30s, too short) +timeout = 1200 diff --git a/scripts/seahub.sh b/scripts/seahub.sh new file mode 100755 index 0000000000..205d003c9f --- /dev/null +++ b/scripts/seahub.sh @@ -0,0 +1,321 @@ +#!/bin/bash + +### BEGIN INIT INFO +# Provides: seahub +# Required-Start: $local_fs $remote_fs $network +# Required-Stop: $local_fs +# Default-Start: 1 2 3 4 5 +# Default-Stop: +# Short-Description: Starts Seahub +# Description: starts Seahub +### END INIT INFO + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +central_config_dir=${TOPDIR}/conf +seafile_rpc_pipe_path=${INSTALLPATH}/runtime + +manage_py=${INSTALLPATH}/seahub/manage.py +gunicorn_conf=${TOPDIR}/conf/gunicorn.conf.py +pidfile=${TOPDIR}/pids/seahub.pid +errorlog=${TOPDIR}/logs/gunicorn_error.log +accesslog=${TOPDIR}/logs/gunicorn_access.log +gunicorn_exe=${INSTALLPATH}/seahub/thirdpart/bin/gunicorn +pro_pylibs_dir=${INSTALLPATH}/pro/python +seafesdir=$pro_pylibs_dir/seafes + +script_name=$0 +function usage () { + echo "Usage: " + echo + echo " $(basename ${script_name}) { start | stop | restart }" + echo + echo "To run seahub in fastcgi:" + echo + echo " $(basename ${script_name}) { start-fastcgi | stop | restart-fastcgi }" + echo + echo " is optional, and defaults to 8000" + echo "" +} + +# Check args +if [[ $1 != "start" && $1 != "stop" && $1 != "restart" \ + && $1 != "start-fastcgi" && $1 != "restart-fastcgi" && $1 != "clearsessions" && $1 != "python-env" ]]; then + usage; + exit 1; +fi + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function validate_seahub_running () { + if pgrep -f "${manage_py}" 2>/dev/null 1>&2; then + echo "Seahub is already running." + exit 1; + elif pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo "Seahub is already running." + exit 1; + fi +} + +function validate_port () { + if ! [[ ${port} =~ ^[1-9][0-9]{1,4}$ ]] ; then + printf "\033[033m${port}\033[m is not a valid port number\n\n" + usage; + exit 1 + fi +} + +if [[ ($1 == "start" || $1 == "restart" || $1 == "start-fastcgi" || $1 == "restart-fastcgi") \ + && ($# == 2 || $# == 1) ]]; then + if [[ $# == 2 ]]; then + port=$2 + validate_port + else + port=8000 + fi +elif [[ $1 == "stop" && $# == 1 ]]; then + dummy=dummy +elif [[ $1 == "clearsessions" && $# == 1 ]]; then + dummy=dummy +elif [[ $1 == "python-env" ]]; then + dummy=dummy +else + usage; + exit 1 +fi + +function warning_if_seafile_not_running () { + if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo + echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?" + echo + exit 1 + fi +} + +function prepare_seahub_log_dir() { + logdir=${TOPDIR}/logs + if ! [[ -d ${logsdir} ]]; then + if ! mkdir -p "${logdir}"; then + echo "ERROR: failed to create logs dir \"${logdir}\"" + exit 1 + fi + fi + export SEAHUB_LOG_DIR=${logdir} +} + +function before_start() { + prepare_env; + warning_if_seafile_not_running; + validate_seahub_running; + prepare_seahub_log_dir; + + if [[ -d ${INSTALLPATH}/pro ]]; then + if [[ -z "$LANG" ]]; then + echo "LANG is not set in ENV, set to en_US.UTF-8" + export LANG='en_US.UTF-8' + fi + if [[ -z "$LC_ALL" ]]; then + echo "LC_ALL is not set in ENV, set to en_US.UTF-8" + export LC_ALL='en_US.UTF-8' + fi + + export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/ + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts + export SEAFES_DIR=$seafesdir + fi +} + +function start_seahub () { + before_start; + echo "Starting seahub at port ${port} ..." + check_init_admin; + + export DJANGO_SETTINGS_MODULE=seahub.settings + $PYTHON $gunicorn_exe seahub.wsgi:application -c "${gunicorn_conf}" --preload + + # Ensure seahub is started successfully + sleep 5 + if ! pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + printf "\033[33mError:Seahub failed to start.\033[m\n" + echo "Please try to run \"./seahub.sh start\" again" + exit 1; + fi + echo + echo "Seahub is started" + echo +} + +function start_seahub_fastcgi () { + before_start; + + # Returns 127.0.0.1 if SEAFILE_FASTCGI_HOST is unset or hasn't got any value, + # otherwise returns value of SEAFILE_FASTCGI_HOST environment variable + address=`(test -z "$SEAFILE_FASTCGI_HOST" && echo "127.0.0.1") || echo $SEAFILE_FASTCGI_HOST` + + echo "Starting seahub (fastcgi) at ${address}:${port} ..." + check_init_admin; + $PYTHON "${manage_py}" runfcgi maxchildren=8 host=$address port=$port pidfile=$pidfile \ + outlog=${accesslog} errlog=${errorlog} + + # Ensure seahub is started successfully + sleep 5 + if ! pgrep -f "${manage_py}" >/dev/null; then + printf "\033[33mError:Seahub failed to start.\033[m\n" + exit 1; + fi + echo + echo "Seahub is started" + echo +} + +function prepare_env() { + check_python_executable; + validate_seafile_data_dir; + + if [[ -z "$LANG" ]]; then + echo "LANG is not set in ENV, set to en_US.UTF-8" + export LANG='en_US.UTF-8' + fi + if [[ -z "$LC_ALL" ]]; then + echo "LC_ALL is not set in ENV, set to en_US.UTF-8" + export LC_ALL='en_US.UTF-8' + fi + + export CCNET_CONF_DIR=${default_ccnet_conf_dir} + export SEAFILE_CONF_DIR=${default_seafile_data_dir} + export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir} + export SEAFILE_RPC_PIPE_PATH=${seafile_rpc_pipe_path} + export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + + +} + +function clear_sessions () { + prepare_env; + + echo "Start clear expired session records ..." + $PYTHON "${manage_py}" clearsessions + + echo + echo "Done" + echo +} + +function stop_seahub () { + if [[ -f ${pidfile} ]]; then + echo "Stopping seahub ..." + pkill -9 -f "thirdpart/bin/gunicorn" + sleep 1 + if pgrep -f "thirdpart/bin/gunicorn" 2>/dev/null 1>&2 ; then + echo 'Failed to stop seahub.' + exit 1 + fi + rm -f ${pidfile} + return 0 + else + echo "Seahub is not running" + fi +} + +function check_init_admin() { + check_init_admin_script=${INSTALLPATH}/check_init_admin.py + if ! $PYTHON $check_init_admin_script; then + exit 1 + fi +} + +function run_python_env() { + local pyexec + + prepare_env; + + if [[ -d ${INSTALLPATH}/pro ]]; then + export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/ + export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts + export SEAFES_DIR=$seafesdir + fi + + if which ipython 2>/dev/null; then + pyexec=ipython + else + pyexec=$PYTHON + fi + + if [[ $# -eq 0 ]]; then + $pyexec "$@" + else + "$@" + fi +} + +case $1 in + "start" ) + start_seahub; + ;; + "start-fastcgi" ) + start_seahub_fastcgi; + ;; + "stop" ) + stop_seahub; + ;; + "restart" ) + stop_seahub + sleep 2 + start_seahub + ;; + "restart-fastcgi" ) + stop_seahub + sleep 2 + start_seahub_fastcgi + ;; + "python-env") + shift + run_python_env "$@" + ;; + "clearsessions" ) + clear_sessions + ;; +esac + +echo "Done." +echo "" diff --git a/scripts/server-release.md b/scripts/server-release.md new file mode 100644 index 0000000000..c02226a6a0 --- /dev/null +++ b/scripts/server-release.md @@ -0,0 +1,31 @@ +# Server Release Package + +1. Libsearpc + cd libsearpc; + CFLAGS="-O2" configure --prefix=$dest + make install +2. Ccnet + cd ccnet; + CFLAGS="-O2" ./configure --enable-server-pkg --prefix=$dest + make install +3. Seafile + cd seafile; + CFLAGS="-O2" configure --enable-server-pkg --prefix=$dest + make install +4. copy shared libraries + scripts/cp-shared-lib.py $dest/lib +5. strip libs/executables + python do-strip.py +6. Update seahub + cd seahub + git fetch origin + git checkout release + git rebase origin/master + +7. Pack + ./pack-server.sh 1.0.0 + +DONE! + + + diff --git a/scripts/setup-seafile-mysql.py b/scripts/setup-seafile-mysql.py new file mode 100644 index 0000000000..44b4c5ddee --- /dev/null +++ b/scripts/setup-seafile-mysql.py @@ -0,0 +1,1598 @@ +#coding: UTF-8 + +'''This script would guide the seafile admin to setup seafile with MySQL''' +import argparse +import sys +import os +import time +import re +import shutil +import glob +import subprocess +import hashlib +import getpass +import uuid +import warnings +import socket +from configparser import ConfigParser + +import pymysql + +try: + import readline # pylint: disable=W0611 +except ImportError: + pass + + +SERVER_MANUAL_HTTP = 'https://download.seafile.com/published/seafile-manual/home.md' + +class Utils(object): + '''Groups all helper functions here''' + @staticmethod + def welcome(): + '''Show welcome message''' + welcome_msg = '''\ +----------------------------------------------------------------- +This script will guide you to setup your seafile server using MySQL. +Make sure you have read seafile server manual at + + %s + +Press ENTER to continue +-----------------------------------------------------------------''' % SERVER_MANUAL_HTTP + print(welcome_msg) + input() + + @staticmethod + def highlight(content): + '''Add ANSI color to content to get it highlighted on terminal''' + return '\x1b[33m%s\x1b[m' % content + + @staticmethod + def info(msg): + print(msg) + + @staticmethod + def error(msg): + '''Print error and exit''' + print() + print('Error: ' + msg) + sys.exit(1) + + @staticmethod + def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Run a program and wait it to finish, and return its exit code. The + standard output of this program is supressed. + + ''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(argv, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env) + return proc.wait() + + @staticmethod + def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Like run_argv but specify a command line string instead of argv''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(cmdline, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env, + shell=True) + return proc.wait() + + @staticmethod + def get_command_output(args, *a, **kw): + return subprocess.check_output(args, *a, **kw) + + @staticmethod + def prepend_env_value(name, value, env=None, seperator=':'): + '''prepend a new value to a list''' + if env is None: + env = os.environ + + try: + current_value = env[name] + except KeyError: + current_value = '' + + new_value = value + if current_value: + new_value += seperator + current_value + + env[name] = new_value + + @staticmethod + def must_mkdir(path): + '''Create a directory, exit on failure''' + if os.path.exists(path): + return + try: + os.makedirs(path) + except OSError as e: + Utils.error('failed to create directory %s:%s' % (path, e)) + + @staticmethod + def must_copy(src, dst): + '''Copy src to dst, exit on failure''' + try: + shutil.copy(src, dst) + except Exception as e: + Utils.error('failed to copy %s to %s: %s' % (src, dst, e)) + + @staticmethod + def find_in_path(prog): + if 'win32' in sys.platform: + sep = ';' + else: + sep = ':' + + dirs = os.environ['PATH'].split(sep) + for d in dirs: + d = d.strip() + if d == '': + continue + path = os.path.join(d, prog) + if os.path.exists(path): + return path + + return None + + @staticmethod + def get_python_executable(): + '''Return the python executable. This should be the PYTHON environment + variable which is set in setup-seafile-mysql.sh + + ''' + return os.environ['PYTHON'] + + @staticmethod + def read_config(fn): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + cp = ConfigParser() + cp.optionxform = str + cp.read(fn) + + return cp + + @staticmethod + def write_config(cp, fn): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + with open(fn, 'w') as fp: + cp.write(fp) + + @staticmethod + def ask_question(desc, + key=None, + note=None, + default=None, + validate=None, + yes_or_no=False, + password=False): + '''Ask a question, return the answer. + @desc description, e.g. "What is the port of ccnet?" + + @key a name to represent the target of the question, e.g. "port for + ccnet server" + + @note additional information for the question, e.g. "Must be a valid + port number" + + @default the default value of the question. If the default value is + not None, when the user enter nothing and press [ENTER], the default + value would be returned + + @validate a function that takes the user input as the only parameter + and validate it. It should return a validated value, or throws an + "InvalidAnswer" exception if the input is not valid. + + @yes_or_no If true, the user must answer "yes" or "no", and a boolean + value would be returned + + @password If true, the user input would not be echoed to the + console + + ''' + assert key or yes_or_no + # Format description + print() + if note: + desc += '\n' + note + + desc += '\n' + if yes_or_no: + desc += '[ yes or no ]' + else: + if default: + desc += '[ default "%s" ]' % default + else: + desc += '[ %s ]' % key + + desc += ' ' + while True: + # prompt for user input + if password: + answer = getpass.getpass(desc).strip() + else: + answer = input(desc).strip() + + # No user input: use default + if not answer: + if default: + answer = default + else: + continue + + # Have user input: validate answer + if yes_or_no: + if answer not in ['yes', 'no']: + print(Utils.highlight('\nPlease answer yes or no\n')) + continue + else: + return answer == 'yes' + else: + if validate: + try: + return validate(answer) + except InvalidAnswer as e: + print(Utils.highlight('\n%s\n' % e)) + continue + else: + return answer + + @staticmethod + def validate_port(port): + try: + port = int(port) + except ValueError: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + if port <= 0 or port > 65535: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + return port + + +class InvalidAnswer(Exception): + def __init__(self, msg): + Exception.__init__(self) + self.msg = msg + def __str__(self): + return self.msg + +class InvalidParams(Exception): + def __init__(self, msg): + Exception.__init__(self) + self.msg = msg + def __str__(self): + return self.msg + +### END of Utils +#################### + +class EnvManager(object): + '''System environment and directory layout''' + def __init__(self): + self.install_path = os.path.dirname(os.path.abspath(__file__)) + self.top_dir = os.path.dirname(self.install_path) + self.bin_dir = os.path.join(self.install_path, 'seafile', 'bin') + self.central_config_dir = os.path.join(self.top_dir, 'conf') + self.central_pids_dir = os.path.join(self.top_dir, 'pids') + self.central_logs_dir = os.path.join(self.top_dir, 'logs') + Utils.must_mkdir(self.central_config_dir) + self.is_pro = os.path.exists(os.path.join(self.install_path, 'pro')) + + def check_pre_condiction(self): + def error_if_not_exists(path): + if not os.path.exists(path): + Utils.error('"%s" not found' % path) + + paths = [ + os.path.join(self.install_path, 'seafile'), + os.path.join(self.install_path, 'seahub'), + os.path.join(self.install_path, 'runtime'), + ] + + for path in paths: + error_if_not_exists(path) + + if os.path.exists(ccnet_config.ccnet_dir): + Utils.error('Ccnet config dir \"%s\" already exists.' % ccnet_config.ccnet_dir) + + def get_seahub_env(self): + '''Prepare for seahub syncdb''' + env = dict(os.environ) + env['CCNET_CONF_DIR'] = ccnet_config.ccnet_dir + env['SEAFILE_CONF_DIR'] = seafile_config.seafile_dir + env['SEAFES_DIR'] = os.path.join(self.install_path, 'pro', 'python', 'seafes') + self.setup_python_path(env) + return env + + def setup_python_path(self, env): + '''And PYTHONPATH and CCNET_CONF_DIR/SEAFILE_CONF_DIR to env, which is + needed by seahub + + ''' + install_path = self.install_path + pro_pylibs_dir = os.path.join(install_path, 'pro', 'python') + extra_python_path = [ + pro_pylibs_dir, + + os.path.join(install_path, 'seahub', 'thirdpart'), + + os.path.join(install_path, 'seafile/lib/python3/site-packages'), + os.path.join(install_path, 'seafile/lib64/python3/site-packages'), + ] + + for path in extra_python_path: + Utils.prepend_env_value('PYTHONPATH', path, env=env) + + def get_binary_env(self): + '''Set LD_LIBRARY_PATH for seafile server executables''' + env = dict(os.environ) + lib_dir = os.path.join(self.install_path, 'seafile', 'lib') + lib64_dir = os.path.join(self.install_path, 'seafile', 'lib64') + Utils.prepend_env_value('LD_LIBRARY_PATH', lib_dir, env=env) + Utils.prepend_env_value('LD_LIBRARY_PATH', lib64_dir, env=env) + return env + +class AbstractConfigurator(object): + '''Abstract Base class for ccnet/seafile/seahub/db configurator''' + def __init__(self): + pass + + def ask_questions(self): + raise NotImplementedError + + def generate(self): + raise NotImplementedError + + +class AbstractDBConfigurator(AbstractConfigurator): + '''Abstract class for database related configuration''' + def __init__(self): + AbstractConfigurator.__init__(self) + self.mysql_host = 'localhost' + self.mysql_port = 3306 + self.unix_socket = "/var/run/mysqld/mysqld.sock" + + self.use_existing_db = False + + self.seafile_mysql_user = '' + self.seafile_mysql_password = '' + self.seafile_mysql_userhost = '127.0.0.1' + + self.root_password = '' + self.root_conn = '' + + self.ccnet_db_name = '' + self.seafile_db_name = '' + self.seahub_db_name = '' + + self.seahub_admin_email = '' + self.seahub_admin_password = '' + + @staticmethod + def ask_use_existing_db(): + def validate(choice): + if choice not in ['1', '2']: + raise InvalidAnswer('Please choose 1 or 2') + + return choice == '2' + + question = '''\ +------------------------------------------------------- +Please choose a way to initialize seafile databases: +------------------------------------------------------- +''' + + note = '''\ +[1] Create new ccnet/seafile/seahub databases +[2] Use existing ccnet/seafile/seahub databases +''' + return Utils.ask_question(question, + key='1 or 2', + note=note, + validate=validate) + + def validate_mysql_host(self, host): + if not re.match(r'^[a-zA-Z0-9_\-\.]+$', host): + raise InvalidAnswer('%s is not a valid host' % Utils.highlight(host)) + + if host == 'localhost': + host = '127.0.0.1' + return host + + def ask_mysql_host(self): + question = 'What is the host of mysql server?' + key = 'mysql server host' + default = 'localhost' + self.mysql_host = Utils.ask_question(question, + key=key, + default=default, + validate=self.validate_mysql_host) + + def validate_mysql_user_host(self, host): + MYSQL_HOST_RE = re.compile(r'^(%|[^.].+\..+[^.])$') + if not MYSQL_HOST_RE.match(host): + raise InvalidAnswer('invalid mysql user host: {}'.format(host)) + return host + + def ask_mysql_user_host(self): + self.seafile_mysql_userhost = Utils.ask_question( + 'From which hosts could the mysql account be used?', + key='mysql user host', + default='%', + validate=self.validate_mysql_user_host + ) + + def ask_mysql_port(self): + question = 'What is the port of mysql server?' + key = 'mysql server port' + default = '3306' + port = Utils.ask_question(question, + key=key, + default=default, + validate=Utils.validate_port) + + # self.check_mysql_server(host, port) + self.mysql_port = port + + def ask_mysql_host_port(self): + self.ask_mysql_host() + if self.mysql_host != '127.0.0.1': + self.ask_mysql_user_host() + self.ask_mysql_port() + + def check_mysql_server(self, host, port): + print('\nverifying mysql server running ... ', end=' ') + try: + dummy = pymysql.connect(host=host, port=port) + except Exception: + print() + raise InvalidAnswer('Failed to connect to mysql server at "%s:%s"' \ + % (host, port)) + + print('done') + + def check_mysql_user(self, user, password, host=None, unix_socket=None): + print('\nverifying password of user %s ... ' % user, end=' ') + kwargs = dict(port=self.mysql_port, + user=user, + passwd=password) + if unix_socket: + kwargs['unix_socket'] = unix_socket + else: + kwargs['host'] = host or self.mysql_host + + try: + conn = pymysql.connect(**kwargs) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' \ + % (user, e.args[1])) + else: + raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' \ + % (user, e)) + + print('done') + return conn + + def create_seahub_admin(self): + try: + conn = pymysql.connect(host=self.mysql_host, + port=self.mysql_port, + user=self.seafile_mysql_user, + passwd=self.seafile_mysql_password, + db=self.ccnet_db_name) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to connect to mysql database %s: %s' % (self.ccnet_db_name, e.args[1])) + else: + Utils.error('Failed to connect to mysql database %s: %s' % (self.ccnet_db_name, e)) + + cursor = conn.cursor() + sql = '''\ +CREATE TABLE IF NOT EXISTS EmailUser (id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, email VARCHAR(255), passwd CHAR(64), is_staff BOOL NOT NULL, is_active BOOL NOT NULL, ctime BIGINT, UNIQUE INDEX (email)) ENGINE=INNODB''' + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to create ccnet user table: %s' % e.args[1]) + else: + Utils.error('Failed to create ccnet user table: %s' % e) + + sql = '''REPLACE INTO EmailUser(email, passwd, is_staff, is_active, ctime) VALUES ('%s', '%s', 1, 1, 0)''' \ + % (seahub_config.admin_email, seahub_config.hashed_admin_password()) + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to create admin user: %s' % e.args[1]) + else: + Utils.error('Failed to create admin user: %s' % e) + + conn.commit() + + def ask_questions(self): + '''Ask questions and do database operations''' + raise NotImplementedError + + +class NewDBConfigurator(AbstractDBConfigurator): + '''Handles the case of creating new mysql databases for ccnet/seafile/seahub''' + def __init__(self): + AbstractDBConfigurator.__init__(self) + + def ask_questions(self): + self.ask_mysql_host_port() + + self.ask_root_password() + self.ask_seafile_mysql_user_password() + + self.ask_db_names() + + def generate(self): + if not self.mysql_user_exists(self.seafile_mysql_user): + self.create_user() + self.create_databases() + + def validate_root_passwd(self, password): + try: + self.root_conn = self.check_mysql_user('root', password) + except InvalidAnswer: + # For MariaDB on Ubuntu 16.04, the msyql root user can only be + # accessed from localhost with unix socket. So we retry with + # localhost when failing with 127.0.0.1. + if self.mysql_host == '127.0.0.1': + self.root_conn = self.check_mysql_user('root', password, unix_socket=self.unix_socket) + else: + raise + return password + + def ask_root_password(self): + question = 'What is the password of the mysql root user?' + key = 'root password' + self.root_password = Utils.ask_question(question, + key=key, + validate=self.validate_root_passwd, + password=True) + + def mysql_user_exists(self, user): + cursor = self.root_conn.cursor() + + sql = '''SELECT EXISTS(SELECT 1 FROM mysql.user WHERE user = '%s' and host = '%s')''' % \ + (user, self.seafile_mysql_userhost) + + try: + cursor.execute(sql) + return cursor.fetchall()[0][0] + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to check mysql user %s@%s: %s' % \ + (user, self.seafile_mysql_userhost, e.args[1])) + else: + Utils.error('Failed to check mysql user %s@%s: %s' % \ + (user, self.seafile_mysql_userhost, e)) + finally: + cursor.close() + + + def ask_seafile_mysql_user_password(self): + def validate(user): + if user == 'root': + raise InvalidAnswer( + 'Using mysql "root" user is not allowed for security reasons. Please specify a different database user.' + ) + else: + question = 'Enter the password for mysql user "%s":' % Utils.highlight(user) + key = 'password for %s' % user + password = Utils.ask_question(question, key=key, password=True) + # If the user already exists, check the password here + if self.mysql_user_exists(user): + self.check_mysql_user(user, password) + self.seafile_mysql_password = password + + return user + + + question = 'Enter the name for mysql user of seafile. It would be created if not exists.' + key = 'mysql user for seafile' + default = 'seafile' + self.seafile_mysql_user = Utils.ask_question(question, + key=key, + default=default, + validate=validate) + + def ask_db_name(self, program, default): + question = 'Enter the database name for %s:' % program + key = '%s database' % program + return Utils.ask_question(question, + key=key, + default=default, + validate=self.validate_db_name) + + def ask_db_names(self): + self.ccnet_db_name = self.ask_db_name('ccnet-server', 'ccnet-db') + self.seafile_db_name = self.ask_db_name('seafile-server', 'seafile-db') + self.seahub_db_name = self.ask_db_name('seahub', 'seahub-db') + + def validate_db_name(self, db_name): + return db_name + + def create_user(self): + cursor = self.root_conn.cursor() + sql = '''CREATE USER '{}'@'{}' IDENTIFIED BY '{}' '''.format( + self.seafile_mysql_user, + self.seafile_mysql_userhost, + self.seafile_mysql_password + ) + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to create mysql user {}@{}: {}'.format(self.seafile_mysql_user, self.seafile_mysql_userhost, e.args[1])) + else: + Utils.error('Failed to create mysql user {}@{}: {}'.format(self.seafile_mysql_user, self.seafile_mysql_userhost, e)) + finally: + cursor.close() + + + def create_db(self, db_name): + cursor = self.root_conn.cursor() + sql = '''CREATE DATABASE IF NOT EXISTS `%s` CHARACTER SET UTF8''' \ + % db_name + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to create database %s: %s' % (db_name, e.args[1])) + else: + Utils.error('Failed to create database %s: %s' % (db_name, e)) + finally: + cursor.close() + + def grant_db_permission(self, db_name): + cursor = self.root_conn.cursor() + sql = '''GRANT ALL PRIVILEGES ON `{}`.* to `{}`@`{}` '''.format( + db_name, + self.seafile_mysql_user, + self.seafile_mysql_userhost + ) + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to grant permission of database %s: %s' % (db_name, e.args[1])) + else: + Utils.error('Failed to grant permission of database %s: %s' % (db_name, e)) + finally: + cursor.close() + + def create_databases(self): + self.create_db(self.ccnet_db_name) + self.create_db(self.seafile_db_name) + self.create_db(self.seahub_db_name) + + if self.seafile_mysql_user != 'root': + self.grant_db_permission(self.ccnet_db_name) + self.grant_db_permission(self.seafile_db_name) + self.grant_db_permission(self.seahub_db_name) + + +class ExistingDBConfigurator(AbstractDBConfigurator): + '''Handles the case of use existing mysql databases for ccnet/seafile/seahub''' + def __init__(self): + AbstractDBConfigurator.__init__(self) + self.use_existing_db = True + + def ask_questions(self): + self.ask_mysql_host_port() + + self.ask_existing_mysql_user_password() + + self.ccnet_db_name = self.ask_db_name('ccnet') + self.seafile_db_name = self.ask_db_name('seafile') + self.seahub_db_name = self.ask_db_name('seahub') + + def generate(self): + pass + + def ask_existing_mysql_user_password(self): + def validate(user): + if user == 'root': + raise InvalidAnswer( + 'Using root is not allowed for security reasons. Please specify a different database user.' + ) + question = 'What is the password for mysql user "%s"?' % Utils.highlight(user) + key = 'password for %s' % user + password = Utils.ask_question(question, key=key, password=True) + self.check_mysql_user(user, password) + self.seafile_mysql_password = password + return user + + question = 'Which mysql user to use for seafile?' + key = 'mysql user for seafile' + self.seafile_mysql_user = Utils.ask_question(question, + key=key, + validate=validate) + + def validate_db_name(self, db_name): + self.check_user_db_access(db_name) + return db_name + + def ask_db_name(self, program): + question = 'Enter the existing database name for %s:' % program + key = '%s database' % program + return Utils.ask_question(question, + key=key, + validate=self.validate_db_name) + + def check_user_db_access(self, db_name): + user = self.seafile_mysql_user + password = self.seafile_mysql_password + + print('\nverifying user "%s" access to database %s ... ' % (user, db_name), end=' ') + try: + conn = pymysql.connect(host=self.mysql_host, + port=self.mysql_port, + user=user, + passwd=password, + db=db_name) + + cursor = conn.cursor() + cursor.execute('show tables') + cursor.close() + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + raise InvalidAnswer('Failed to access database %s using user "%s" and password "***": %s' \ + % (db_name, user, e.args[1])) + else: + raise InvalidAnswer('Failed to access database %s using user "%s" and password "***": %s' \ + % (db_name, user, e)) + + print('done') + + return conn + + +class CcnetConfigurator(AbstractConfigurator): + SERVER_NAME_REGEX = r'^[a-zA-Z0-9_\-]{3,15}$' + SERVER_IP_OR_DOMAIN_REGEX = r'^[^.].+\..+[^.]$' + + def __init__(self): + '''Initialize default values of ccnet configuration''' + AbstractConfigurator.__init__(self) + self.ccnet_dir = os.path.join(env_mgr.top_dir, 'ccnet') + self.port = 10001 + self.server_name = None + self.ip_or_domain = None + self.ccnet_conf = os.path.join(env_mgr.central_config_dir, 'ccnet.conf') + + def ask_questions(self): + if not self.server_name: + self.ask_server_name() + if not self.ip_or_domain: + self.ask_server_ip_or_domain() + # self.ask_port() + + def generate(self): + print('Generating ccnet configuration ...\n') + with open(self.ccnet_conf, 'w') as fp: + fp.write('[General]') + + self.generate_db_conf() + + Utils.must_mkdir(self.ccnet_dir) + + def generate_db_conf(self): + config = Utils.read_config(self.ccnet_conf) + # [Database] + # ENGINE= + # HOST= + # USER= + # PASSWD= + # DB= + db_section = 'Database' + if not config.has_section(db_section): + config.add_section(db_section) + config.set(db_section, 'ENGINE', 'mysql') + config.set(db_section, 'HOST', db_config.mysql_host) + config.set(db_section, 'PORT', str(db_config.mysql_port)) + config.set(db_section, 'USER', db_config.seafile_mysql_user) + config.set(db_section, 'PASSWD', db_config.seafile_mysql_password) + config.set(db_section, 'DB', db_config.ccnet_db_name) + config.set(db_section, 'CONNECTION_CHARSET', 'utf8') + + Utils.write_config(config, self.ccnet_conf) + + def validate_server_name(self, name): + if not re.match(self.SERVER_NAME_REGEX, name): + raise InvalidAnswer('%s is not a valid name' % Utils.highlight(name)) + return name + + def ask_server_name(self): + question = 'What is the name of the server? It will be displayed on the client.' + key = 'server name' + note = '3 - 15 letters or digits' + self.server_name = Utils.ask_question(question, + key=key, + note=note, + validate=self.validate_server_name) + + def validate_server_ip(self, ip_or_domain): + if not re.match(self.SERVER_IP_OR_DOMAIN_REGEX, ip_or_domain): + raise InvalidAnswer('%s is not a valid ip or domain' % ip_or_domain) + return ip_or_domain + + def ask_server_ip_or_domain(self): + question = 'What is the ip or domain of the server?' + key = 'This server\'s ip or domain' + note = 'For example: www.mycompany.com, 192.168.1.101' + self.ip_or_domain = Utils.ask_question(question, + key=key, + note=note, + validate=self.validate_server_ip) + + def ask_port(self): + def validate(port): + return Utils.validate_port(port) + + question = 'Which port do you want to use for the ccnet server?' + key = 'ccnet server port' + default = 10001 + self.port = Utils.ask_question(question, + key=key, + default=default, + validate=validate) + + def do_syncdb(self): + print('----------------------------------------') + print('Now creating ccnet database tables ...\n') + print('----------------------------------------') + + try: + conn = pymysql.connect(host=db_config.mysql_host, + port=db_config.mysql_port, + user=db_config.seafile_mysql_user, + passwd=db_config.seafile_mysql_password, + db=db_config.ccnet_db_name) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.ccnet_db_name, e.args[1])) + else: + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.ccnet_db_name, e)) + + cursor = conn.cursor() + + sql_file = os.path.join(env_mgr.install_path, 'sql', 'mysql', 'ccnet.sql') + with open(sql_file, 'r') as fp: + content = fp.read() + + sqls = [line.strip() for line in content.split(';') if line.strip()] + for sql in sqls: + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to init ccnet database: %s' % e.args[1]) + else: + Utils.error('Failed to init ccnet database: %s' % e) + + conn.commit() + + +class SeafileConfigurator(AbstractConfigurator): + def __init__(self): + AbstractConfigurator.__init__(self) + self.seafile_dir = os.path.join(env_mgr.top_dir, 'seafile-data') + self.port = 12001 + self.fileserver_port = None + self.seafile_conf = os.path.join(env_mgr.central_config_dir, 'seafile.conf') + + def ask_questions(self): + # if not self.seafile_dir: + # self.ask_seafile_dir() + # self.ask_port() + if not self.fileserver_port: + self.ask_fileserver_port() + + def generate(self): + print('Generating seafile configuration ...\n') + with open(self.seafile_conf, 'w') as fp: + fp.write('[fileserver]\nport=%d\n' % self.fileserver_port) + + self.generate_db_conf() + + ## use default seafile-data path: seafile_data_dir=${TOPDIR}/seafile-data + + print('done') + + def generate_db_conf(self): + config = Utils.read_config(self.seafile_conf) + # [database] + # type= + # host= + # user= + # password= + # db_name= + # unix_socket= + db_section = 'database' + if not config.has_section(db_section): + config.add_section(db_section) + config.set(db_section, 'type', 'mysql') + config.set(db_section, 'host', db_config.mysql_host) + config.set(db_section, 'port', str(db_config.mysql_port)) + config.set(db_section, 'user', db_config.seafile_mysql_user) + config.set(db_section, 'password', db_config.seafile_mysql_password) + config.set(db_section, 'db_name', db_config.seafile_db_name) + config.set(db_section, 'connection_charset', 'utf8') + + Utils.write_config(config, self.seafile_conf) + + def validate_seafile_dir(self, path): + if os.path.exists(path): + raise InvalidAnswer('%s already exists' % Utils.highlight(path)) + return path + + def ask_seafile_dir(self): + question = 'Where do you want to put your seafile data?' + key = 'seafile-data' + note = 'Please use a volume with enough free space' + default = os.path.join(env_mgr.top_dir, 'seafile-data') + self.seafile_dir = Utils.ask_question(question, + key=key, + note=note, + default=default, + validate=self.validate_seafile_dir) + + def ask_port(self): + def validate(port): + port = Utils.validate_port(port) + if port == ccnet_config.port: + raise InvalidAnswer('%s is used by ccnet server, choose another one' \ + % Utils.highlight(port)) + return port + + question = 'Which port do you want to use for the seafile server?' + key = 'seafile server port' + default = 12001 + self.port = Utils.ask_question(question, + key=key, + default=default, + validate=validate) + + def ask_fileserver_port(self): + question = 'Which port do you want to use for the seafile fileserver?' + key = 'seafile fileserver port' + default = 8082 + self.fileserver_port = Utils.ask_question(question, + key=key, + default=default, + validate=Utils.validate_port) + + def do_syncdb(self): + print('----------------------------------------') + print('Now creating seafile database tables ...\n') + print('----------------------------------------') + + try: + conn = pymysql.connect(host=db_config.mysql_host, + port=db_config.mysql_port, + user=db_config.seafile_mysql_user, + passwd=db_config.seafile_mysql_password, + db=db_config.seafile_db_name) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.seafile_db_name, e.args[1])) + else: + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.seafile_db_name, e)) + + cursor = conn.cursor() + + sql_file = os.path.join(env_mgr.install_path, 'sql', 'mysql', 'seafile.sql') + with open(sql_file, 'r') as fp: + content = fp.read() + + sqls = [line.strip() for line in content.split(';') if line.strip()] + for sql in sqls: + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to init seafile database: %s' % e.args[1]) + else: + Utils.error('Failed to init seafile database: %s' % e) + + conn.commit() + +class SeahubConfigurator(AbstractConfigurator): + def __init__(self): + AbstractConfigurator.__init__(self) + self.admin_email = '' + self.admin_password = '' + self.seahub_settings_py = os.path.join(env_mgr.central_config_dir, 'seahub_settings.py') + + def hashed_admin_password(self): + return hashlib.sha1(self.admin_password).hexdigest() # pylint: disable=E1101 + + def ask_questions(self): + pass + + def generate(self): + '''Generating seahub_settings.py''' + print('Generating seahub configuration ...\n') + with open(self.seahub_settings_py, 'w') as fp: + self.write_utf8_comment(fp) + fp.write('\n') + self.write_secret_key(fp) + fp.write('\n') + self.write_database_config(fp) + + def write_utf8_comment(self, fp): + fp.write('# -*- coding: utf-8 -*-') + + def write_secret_key(self, fp): + script = os.path.join(env_mgr.install_path, 'seahub/tools/secret_key_generator.py') + cmd = [ + Utils.get_python_executable(), + script, + ] + key = Utils.get_command_output(cmd).strip() + fp.write('SECRET_KEY = "%s"' % key) + + def write_database_config(self, fp): + template = '''\ +\nDATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.mysql', + 'NAME': '%(name)s', + 'USER': '%(username)s', + 'PASSWORD': '%(password)s', + 'HOST': '%(host)s', + 'PORT': '%(port)s', + 'OPTIONS': {'charset': 'utf8mb4'}, + } +} + +''' + text = template % dict(name=db_config.seahub_db_name, + username=db_config.seafile_mysql_user, + password=db_config.seafile_mysql_password, + host=db_config.mysql_host, + port=db_config.mysql_port) + + fp.write(text) + + def ask_admin_email(self): + print() + print('----------------------------------------') + print('Now let\'s create the admin account') + print('----------------------------------------') + def validate(email): + # whitespace is not allowed + if re.match(r'[\s]', email): + raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email)) + # must be a valid email address + if not re.match(r'^.+@.*\..+$', email): + raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email)) + + return email + + key = 'admin email' + question = 'What is the ' + Utils.highlight('email') + ' for the admin account?' + self.admin_email = Utils.ask_question(question, + key=key, + validate=validate) + + def ask_admin_password(self): + def validate(password): + key = 'admin password again' + question = 'Enter the ' + Utils.highlight('password again:') + password_again = Utils.ask_question(question, + key=key, + password=True) + + if password_again != password: + raise InvalidAnswer('password mismatch') + + return password + + key = 'admin password' + question = 'What is the ' + Utils.highlight('password') + ' for the admin account?' + self.admin_password = Utils.ask_question(question, + key=key, + password=True, + validate=validate) + + def do_syncdb(self): + print('----------------------------------------') + print('Now creating seahub database tables ...\n') + print('----------------------------------------') + + try: + conn = pymysql.connect(host=db_config.mysql_host, + port=db_config.mysql_port, + user=db_config.seafile_mysql_user, + passwd=db_config.seafile_mysql_password, + db=db_config.seahub_db_name) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.seahub_db_name, e.args[1])) + else: + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.seahub_db_name, e)) + + cursor = conn.cursor() + + sql_file = os.path.join(env_mgr.install_path, 'seahub', 'sql', 'mysql.sql') + with open(sql_file, 'r') as fp: + content = fp.read() + + sqls = [line.strip() for line in content.split(';') if line.strip()] + for sql in sqls: + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to init seahub database: %s' % e.args[1]) + else: + Utils.error('Failed to init seahub database: %s' % e) + + conn.commit() + + def prepare_avatar_dir(self): + # media_dir=${INSTALLPATH}/seahub/media + # orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + # dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # if [[ ! -d ${dest_avatar_dir} ]]; then + # mkdir -p "${TOPDIR}/seahub-data" + # mv "${orig_avatar_dir}" "${dest_avatar_dir}" + # ln -s ../../../seahub-data/avatars ${media_dir} + # fi + + try: + media_dir = os.path.join(env_mgr.install_path, 'seahub', 'media') + orig_avatar_dir = os.path.join(media_dir, 'avatars') + + seahub_data_dir = os.path.join(env_mgr.top_dir, 'seahub-data') + dest_avatar_dir = os.path.join(seahub_data_dir, 'avatars') + + if os.path.exists(dest_avatar_dir): + return + + if not os.path.exists(seahub_data_dir): + os.mkdir(seahub_data_dir) + + shutil.move(orig_avatar_dir, dest_avatar_dir) + os.symlink('../../../seahub-data/avatars', orig_avatar_dir) + except Exception as e: + Utils.error('Failed to prepare seahub avatars dir: %s' % e) + +class SeafDavConfigurator(AbstractConfigurator): + def __init__(self): + AbstractConfigurator.__init__(self) + self.seafdav_conf = None + + def ask_questions(self): + pass + + def generate(self): + self.seafdav_conf = os.path.join(env_mgr.central_config_dir, 'seafdav.conf') + text = ''' +[WEBDAV] +enabled = false +port = 8080 +share_name = / +''' + + with open(self.seafdav_conf, 'w') as fp: + fp.write(text) + +class ProfessionalConfigurator(AbstractConfigurator): + '''Seafile Pro related configuration''' + def __init__(self): + AbstractConfigurator.__init__(self) + self.pro_py = os.path.join(env_mgr.install_path, 'pro', 'pro.py') + self.pro_data_dir = os.path.join(env_mgr.top_dir, 'pro-data') + + def ask_questions(self): + pass + + def generate(self): + argv = [ + Utils.get_python_executable(), + self.pro_py, + 'setup', + '--mysql', + '--mysql_host=%s' % db_config.mysql_host, + '--mysql_port=%s' % db_config.mysql_port, + '--mysql_user=%s' % db_config.seafile_mysql_user, + '--mysql_password=%s' % db_config.seafile_mysql_password, + '--mysql_db=%s' % db_config.seahub_db_name, + ] + if Utils.run_argv(argv, env=env_mgr.get_seahub_env()) != 0: + Utils.error('Failed to generate seafile pro configuration') + +class GunicornConfigurator(AbstractConfigurator): + def __init__(self): + AbstractConfigurator.__init__(self) + self.gunicorn_conf = None + + def ask_questions(self): + pass + + def generate(self): + self.gunicorn_conf = os.path.join(env_mgr.central_config_dir, 'gunicorn.conf.py') + template = ''' +import os + +daemon = True +workers = 5 + +# default localhost:8000 +bind = "127.0.0.1:8000" + +# Pid +pids_dir = '%(pids_dir)s' +pidfile = os.path.join(pids_dir, 'seahub.pid') + +# for file upload, we need a longer timeout value (default is only 30s, too short) +timeout = 1200 + +limit_request_line = 8190 +''' + + text = template % dict(pids_dir=env_mgr.central_pids_dir, + logs_dir=env_mgr.central_logs_dir) + + with open(self.gunicorn_conf, 'w') as fp: + fp.write(text) + +class UserManualHandler(object): + def __init__(self): + self.src_docs_dir = os.path.join(env_mgr.install_path, 'seafile', 'docs') + self.library_template_dir = None + + def copy_user_manuals(self): + self.library_template_dir = os.path.join(seafile_config.seafile_dir, 'library-template') + Utils.must_mkdir(self.library_template_dir) + + pattern = os.path.join(self.src_docs_dir, '*.doc') + + for doc in glob.glob(pattern): + Utils.must_copy(doc, self.library_template_dir) + +def report_config(): + print() + print('---------------------------------') + print('This is your configuration') + print('---------------------------------') + print() + + template = '''\ + server name: %(server_name)s + server ip/domain: %(ip_or_domain)s + + seafile data dir: %(seafile_dir)s + fileserver port: %(fileserver_port)s + + database: %(use_existing_db)s + ccnet database: %(ccnet_db_name)s + seafile database: %(seafile_db_name)s + seahub database: %(seahub_db_name)s + database user: %(db_user)s + +''' + config = { + 'server_name' : ccnet_config.server_name, + 'ip_or_domain' : ccnet_config.ip_or_domain, + + 'seafile_dir' : seafile_config.seafile_dir, + 'fileserver_port' : seafile_config.fileserver_port, + + 'admin_email' : seahub_config.admin_email, + + + 'use_existing_db': 'use existing' if db_config.use_existing_db else 'create new', + 'ccnet_db_name': db_config.ccnet_db_name, + 'seafile_db_name': db_config.seafile_db_name, + 'seahub_db_name': db_config.seahub_db_name, + 'db_user': db_config.seafile_mysql_user + } + + print(template % config) + + if need_pause: + print() + print('---------------------------------') + print('Press ENTER to continue, or Ctrl-C to abort') + print('---------------------------------') + + input() + + +def create_seafile_server_symlink(): + print('\ncreating seafile-server-latest symbolic link ... ', end=' ') + seafile_server_symlink = os.path.join(env_mgr.top_dir, 'seafile-server-latest') + try: + os.symlink(os.path.basename(env_mgr.install_path), seafile_server_symlink) + except Exception as e: + print('\n') + Utils.error('Failed to create symbolic link %s: %s' % (seafile_server_symlink, e)) + else: + print('done\n\n') + +def set_file_perm(): + filemode = 0o600 + dirmode = 0o700 + files = [ + seahub_config.seahub_settings_py, + ] + dirs = [ + env_mgr.central_config_dir, + ccnet_config.ccnet_dir, + seafile_config.seafile_dir, + seahub_config.seahub_settings_py, + ] + for fpath in files: + os.chmod(fpath, filemode) + for dpath in dirs: + os.chmod(dpath, dirmode) + +env_mgr = EnvManager() +ccnet_config = CcnetConfigurator() +seafile_config = SeafileConfigurator() +seafdav_config = SeafDavConfigurator() +gunicorn_config = GunicornConfigurator() +seahub_config = SeahubConfigurator() +user_manuals_handler = UserManualHandler() +if env_mgr.is_pro: + pro_config = ProfessionalConfigurator() +# Would be created after AbstractDBConfigurator.ask_use_existing_db() +db_config = None +need_pause = True + +def get_param_val(arg, env, default=None): + return arg or os.environ.get(env, default) + +def check_params(args): + server_name = 'seafile' + ccnet_config.server_name = ccnet_config.validate_server_name(server_name) + + server_ip = get_param_val(args.server_ip, 'SERVER_IP', '127.0.0.1') + ccnet_config.ip_or_domain = ccnet_config.validate_server_ip(server_ip) + + fileserver_port = get_param_val(args.fileserver_port, 'FILESERVER_PORT', '8082') + seafile_config.fileserver_port = Utils.validate_port(fileserver_port) + + seafile_dir = get_param_val(args.seafile_dir, 'SEAFILE_DIR', + os.path.join(env_mgr.top_dir, 'seafile-data')) + seafile_config.seafile_dir = seafile_config.validate_seafile_dir(seafile_dir) + + global db_config + + use_existing_db = get_param_val(args.use_existing_db, 'USE_EXISTING_DB', '0') + # pylint: disable=redefined-variable-type + if use_existing_db == '0': + db_config = NewDBConfigurator() + elif use_existing_db == '1': + db_config = ExistingDBConfigurator() + else: + raise InvalidParams('Invalid use existing db parameter, the value can only be 0 or 1') + + mysql_host = get_param_val(args.mysql_host, 'MYSQL_HOST', '127.0.0.1') + if not mysql_host: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing mysql host parameter') + db_config.mysql_host = db_config.validate_mysql_host(mysql_host) + + mysql_port = get_param_val(args.mysql_port, 'MYSQL_PORT', '3306') + db_config.mysql_port = Utils.validate_port(mysql_port) + + mysql_user = get_param_val(args.mysql_user, 'MYSQL_USER') + if not mysql_user: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing mysql user name parameter') + + mysql_user_passwd = get_param_val(args.mysql_user_passwd, 'MYSQL_USER_PASSWD') + if not mysql_user_passwd: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing mysql user password parameter') + + ccnet_db = get_param_val(args.ccnet_db, 'CCNET_DB', 'ccnet_db') + if not ccnet_db: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing ccnet db name parameter') + + seafile_db = get_param_val(args.seafile_db, 'SEAFILE_DB', 'seafile_db') + if not seafile_db: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing seafile db name parameter') + + seahub_db = get_param_val(args.seahub_db, 'SEAHUB_DB', 'seahub_db') + if not seahub_db: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing seahub db name parameter') + + mysql_user_host = get_param_val(args.mysql_user_host, 'MYSQL_USER_HOST') + mysql_root_passwd = get_param_val(args.mysql_root_passwd, 'MYSQL_ROOT_PASSWD') + + if db_config.use_existing_db: + db_config.check_mysql_user(mysql_user, mysql_user_passwd) + db_config.seafile_mysql_user = mysql_user + db_config.seafile_mysql_password = mysql_user_passwd + db_config.ccnet_db_name = db_config.validate_db_name(ccnet_db) + db_config.seafile_db_name = db_config.validate_db_name(seafile_db) + db_config.seahub_db_name = db_config.validate_db_name(seahub_db) + else: + if db_config.mysql_host != '127.0.0.1' and not mysql_user_host: + raise InvalidParams('mysql user host parameter is missing in creating new db mode') + if not mysql_user_host: + db_config.seafile_mysql_userhost = '127.0.0.1' + else: + db_config.seafile_mysql_userhost = db_config.validate_mysql_user_host(mysql_user_host) + + if not mysql_root_passwd and "MYSQL_ROOT_PASSWD" not in os.environ: + raise InvalidParams('mysql root password parameter is missing in creating new db mode') + db_config.root_password = db_config.validate_root_passwd(mysql_root_passwd) + + if mysql_user == 'root': + db_config.seafile_mysql_user = 'root' + db_config.seafile_mysql_password = db_config.root_password + else: + if db_config.mysql_user_exists(mysql_user): + db_config.check_mysql_user(mysql_user, mysql_user_passwd) + db_config.seafile_mysql_user = mysql_user + db_config.seafile_mysql_password = mysql_user_passwd + db_config.ccnet_db_name = ccnet_db + db_config.seafile_db_name = seafile_db + db_config.seahub_db_name = seahub_db + + global need_pause + need_pause = False + + +def main(): + if len(sys.argv) > 2 and sys.argv[1] == 'auto': + sys.argv.remove('auto') + parser = argparse.ArgumentParser() + parser.add_argument('-n', '--server-name', help='server name') + parser.add_argument('-i', '--server-ip', help='server ip or domain') + parser.add_argument('-p', '--fileserver-port', help='fileserver port') + parser.add_argument('-d', '--seafile-dir', help='seafile dir to store seafile data') + parser.add_argument('-e', '--use-existing-db', + help='use mysql existing dbs or create new dbs, ' + '0: create new dbs 1: use existing dbs') + parser.add_argument('-o', '--mysql-host', help='mysql host') + parser.add_argument('-t', '--mysql-port', help='mysql port') + parser.add_argument('-u', '--mysql-user', help='mysql user name') + parser.add_argument('-w', '--mysql-user-passwd', help='mysql user password') + parser.add_argument('-q', '--mysql-user-host', help='mysql user host') + parser.add_argument('-r', '--mysql-root-passwd', help='mysql root password') + parser.add_argument('-c', '--ccnet-db', help='ccnet db name') + parser.add_argument('-s', '--seafile-db', help='seafile db name') + parser.add_argument('-b', '--seahub-db', help='seahub db name') + + args = parser.parse_args() + + try: + check_params(args) + except (InvalidAnswer, InvalidParams) as e: + print(Utils.highlight('\n%s\n' % e)) + sys.exit(-1) + + global db_config + + if need_pause: + Utils.welcome() + warnings.filterwarnings('ignore', category=pymysql.Warning) + + env_mgr.check_pre_condiction() + + # Part 1: collect configuration + ccnet_config.ask_questions() + seafile_config.ask_questions() + seahub_config.ask_questions() + if env_mgr.is_pro: + pro_config.ask_questions() + + # pylint: disable=redefined-variable-type + if not db_config: + if AbstractDBConfigurator.ask_use_existing_db(): + db_config = ExistingDBConfigurator() + else: + db_config = NewDBConfigurator() + + db_config.ask_questions() + + report_config() + + # Part 2: generate configuration + db_config.generate() + ccnet_config.generate() + seafile_config.generate() + seafdav_config.generate() + gunicorn_config.generate() + seahub_config.generate() + if env_mgr.is_pro: + pro_config.generate() + + ccnet_config.do_syncdb() + seafile_config.do_syncdb() + seahub_config.do_syncdb() + seahub_config.prepare_avatar_dir() + # db_config.create_seahub_admin() + user_manuals_handler.copy_user_manuals() + create_seafile_server_symlink() + + set_file_perm() + + report_success() + +def report_success(): + message = '''\ + + +----------------------------------------------------------------- +Your seafile server configuration has been finished successfully. +----------------------------------------------------------------- + +run seafile server: ./seafile.sh { start | stop | restart } +run seahub server: ./seahub.sh { start | stop | restart } + +----------------------------------------------------------------- +If you are behind a firewall, remember to allow input/output of these tcp ports: +----------------------------------------------------------------- + +port of seafile fileserver: %(fileserver_port)s +port of seahub: 8000 + +When problems occur, Refer to + + %(server_manual_http)s + +for information. + +''' + + print(message % dict(fileserver_port=seafile_config.fileserver_port, + server_manual_http=SERVER_MANUAL_HTTP)) + + +if __name__ == '__main__': + try: + main() + except KeyboardInterrupt: + print() + print(Utils.highlight('The setup process is aborted')) + print() diff --git a/scripts/setup-seafile-mysql.sh b/scripts/setup-seafile-mysql.sh new file mode 100755 index 0000000000..18995e4a7c --- /dev/null +++ b/scripts/setup-seafile-mysql.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +######## +### This script is a wrapper for setup-seafile-mysql.py +######## + +set -e + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") + +cd "$INSTALLPATH" + +python_script=setup-seafile-mysql.py + +function err_and_quit () { + printf "\n\n\033[33mError occured during setup. \nPlease fix possible problems and run the script again.\033[m\n\n" + exit 1; +} + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + err_and_quit + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + err_and_quit + fi + fi +} + +function check_python () { + echo "Checking python on this machine ..." + check_python_executable + echo +} + +function check_java () { + echo -n "Checking for java ..." + if ! which java 2>/dev/null 1>&2; then + echo -e "\nJava is not found. install it first.\n" + echo "On Debian/Ubuntu: apt-get install default-jre" + echo "On CentOS/RHEL: yum install jre" + err_and_quit; + fi + printf "Done.\n\n" +} + +check_python; + +if [[ -d ${INSTALLPATH}/pro ]]; then + check_java; +fi + +export PYTHON=$PYTHON + +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + +exec $PYTHON "$python_script" "$@" diff --git a/scripts/setup-seafile.sh b/scripts/setup-seafile.sh new file mode 100755 index 0000000000..16f5a39859 --- /dev/null +++ b/scripts/setup-seafile.sh @@ -0,0 +1,761 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +default_pids_dir=${TOPDIR}/pids +default_logs_dir=${TOPDIR}/logs + +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +server_manual_http='https://download.seafile.com/published/seafile-manual/home.md' + +function welcome () { + echo "-----------------------------------------------------------------" + if [[ -d ${INSTALLPATH}/pro ]]; then + echo "This script will guide you to config and setup your seafile professional server." + else + echo "This script will guide you to config and setup your seafile server." + fi + echo -e "\nMake sure you have read seafile server manual at \n\n\t${server_manual_http}\n" + echo -e "Note: This script will guide your to setup seafile server using sqlite3," + echo "which may have problems if your disk is on a NFS/CIFS/USB." + echo "In these cases, we suggest you setup seafile server using MySQL." + echo + echo "Press [ENTER] to continue" + echo "-----------------------------------------------------------------" + read dummy + echo +} + +function err_and_quit () { + printf "\n\n\033[33mError occured during setup. \nPlease fix possible issues and run the script again.\033[m\n\n" + exit 1; +} + +function on_ctrl_c_pressed () { + printf "\n\n\033[33mYou have pressed Ctrl-C. Setup is interrupted.\033[m\n\n" + exit 1; +} + +# clean newly created ccnet/seafile configs when exit on SIGINT +trap on_ctrl_c_pressed 2 + +function check_sanity () { + if ! [[ -d ${INSTALLPATH}/seahub && -d ${INSTALLPATH}/seafile \ + && -d ${INSTALLPATH}/runtime ]]; then + echo + echo "The seafile-server diretory doesn't contain all needed files." + echo "Please make sure you have extracted all files and folders from tarball." + err_and_quit; + fi +} + +function read_yes_no () { + printf "[yes|no] " + read yesno; + while [[ "${yesno}" != "yes" && "${yesno}" != "no" ]] + do + printf "please answer [yes|no] " + read yesno; + done + + if [[ "${yesno}" == "no" ]]; then + return 1; + else + return 0; + fi +} + +function check_existing_ccnet () { + if [[ -d ${default_ccnet_conf_dir} ]]; then + echo "\033[31m Error: \033[0m Ccnet config dir \"${default_ccnet_conf_dir}\" already exists." + echo + exit 1; + fi + echo +} + +function check_existing_seafile () { + if [[ -d ${default_seafile_data_dir} ]]; then + echo "\033[31m Error: \033[0m Seafile server data dir \"${default_seafile_data_dir}\" already exists." + echo + exit 1; + fi + echo +} + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + err_and_quit + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + err_and_quit + fi + fi + + echo "Find python: $PYTHON" + echo +} + +function check_python_module () { + module=$1 + name=$2 + hint=$3 + printf " Checking python module: ${name} ... " + if ! $PYTHON -c "import ${module}" 2>/dev/null 1>&2; then + echo + printf "\033[33m ${name} \033[m is not installed, Please install it first.\n" + if [[ "${hint}" != "" ]]; then + printf "${hint}" + echo + fi + err_and_quit; + fi + echo -e "Done." +} + +function check_python () { + echo "Checking python on this machine ..." + check_python_executable + check_python_module sqlite3 python-sqlite3 + echo +} + +function check_sqlite3 () { + echo -n "Checking for sqlite3 ..." + if ! which sqlite3 2>/dev/null 1>&2; then + echo -e "\nSqlite3 is not found. install it first.\n" + echo "On Debian/Ubuntu: apt-get install sqlite3" + echo "On CentOS/RHEL: yum install sqlite" + err_and_quit; + fi + printf "Done.\n\n" +} + +function check_java () { + echo -n "Checking for java ..." + if ! which java 2>/dev/null 1>&2; then + echo -e "\nJava is not found. install it first.\n" + echo "On Debian/Ubuntu: apt-get install default-jre" + echo "On CentOS/RHEL: yum install jre" + err_and_quit; + fi + printf "Done.\n\n" +} + +function check_system_dependency () { + printf "Checking packages needed by seafile ...\n\n" + check_python; + check_sqlite3; + + if [[ -d ${INSTALLPATH}/pro ]]; then + check_java; + fi + + printf "Checking Done.\n\n" +} + +function ask_question () { + question=$1 + default=$2 + key=$3 + printf "${question}" + printf "\n" + if [[ "${default}" != "" && "${default}" != "nodefault" ]] ; then + printf "[default: ${default} ] " + elif [[ "${key}" != "" ]]; then + printf "[${key}]: " + fi +} + +function get_server_name () { + question="What would you like to use as the name of this seafile server?\nYour seafile users will be able to see the name in their seafile client." + hint="You can use a-z, A-Z, 0-9, _ and -, and the length should be 3 ~ 15" + ask_question "${question}\n${hint}" "nodefault" "server name" + read server_name + if [[ "${server_name}" == "" ]]; then + echo + echo "server name cannot be empty" + get_server_name + elif [[ ! ${server_name} =~ ^[a-zA-Z0-9_-]{3,14}$ ]]; then + printf "\n\033[33m${server_name}\033[m is not a valid name.\n" + get_server_name; + fi + echo +} + +function get_server_ip_or_domain () { + question="What is the ip or domain of this server?\nFor example, www.mycompany.com, or, 192.168.1.101" + ask_question "${question}\n" "nodefault" "This server's ip or domain" + read ip_or_domain + if [[ "${ip_or_domain}" == "" ]]; then + echo + echo "ip or domain cannot be empty" + get_server_ip_or_domain + fi + echo +} + +# function get_ccnet_server_port () { +# question="What tcp port do you want to use for ccnet server?" +# hint="10001 is the recommended port." +# default="10001" +# ask_question "${question}\n${hint}" "${default}" +# read server_port +# if [[ "${server_port}" == "" ]]; then +# server_port="${default}" +# fi +# if [[ ! ${server_port} =~ ^[0-9]+$ ]]; then +# echo "\"${server_port}\" is not a valid port number. " +# get_ccnet_server_port +# fi +# echo +# } + +# function get_seafile_server_port () { +# question="What tcp port would you like to use for seafile server?" +# hint="12001 is the recommended port." +# default="12001" +# ask_question "${question}\n${hint}" "${default}" +# read seafile_server_port +# if [[ "${seafile_server_port}" == "" ]]; then +# seafile_server_port="${default}" +# fi +# if [[ ! ${seafile_server_port} =~ ^[0-9]+$ ]]; then +# echo "\"${seafile_server_port}\" is not a valid port number. " +# get_seafile_server_port +# fi +# echo +# } + +function get_fileserver_port () { + question="What tcp port do you want to use for seafile fileserver?" + hint="8082 is the recommended port." + default="8082" + ask_question "${question}\n${hint}" "${default}" + read fileserver_port + if [[ "${fileserver_port}" == "" ]]; then + fileserver_port="${default}" + fi + if [[ ! ${fileserver_port} =~ ^[0-9]+$ ]]; then + echo "\"${fileserver_port}\" is not a valid port number. " + get_fileserver_port + fi + echo +} + + +# function get_seafile_data_dir () { +# question="Where would you like to store your seafile data?" +# note="Please use a volume with enough free space." +# default=${default_seafile_data_dir} +# ask_question "${question} \n\033[33mNote: \033[m${note}" "${default}" +# read seafile_data_dir +# if [[ "${seafile_data_dir}" == "" ]]; then +# seafile_data_dir=${default} +# fi +# +# if [[ -d ${seafile_data_dir} && -f ${seafile_data_dir}/seafile.conf ]]; then +# echo +# echo "It seems that you have already existing seafile data in ${seafile_data_dir}." +# echo "Would you like to use the existing seafile data?" +# if ! read_yes_no; then +# echo "You have chosen not to use existing seafile data in ${seafile_data_dir}" +# echo "You need to specify a different seafile data directory or remove ${seafile_data_dir} before continuing." +# get_seafile_data_dir +# else +# use_existing_seafile="true" +# fi +# elif [[ -d ${seafile_data_dir} && $(ls -A ${seafile_data_dir}) != "" ]]; then +# echo +# echo "${seafile_data_dir} is an existing non-empty directory. Please specify a different directory" +# echo +# get_seafile_data_dir +# elif [[ ! ${seafile_data_dir} =~ ^/ ]]; then +# echo +# echo "\"${seafile_data_dir}\" is not an absolute path. Please specify an absolute path." +# echo +# get_seafile_data_dir +# elif [[ ! -d $(dirname ${seafile_data_dir}) ]]; then +# echo +# echo "The path $(dirname ${seafile_data_dir}) does not exist." +# echo +# get_seafile_data_dir +# fi +# echo +# } + +function gen_ccnet_conf () { + mkdir -p ${default_conf_dir} + ccnet_conf=${default_conf_dir}/ccnet.conf + if ! $(cat > ${ccnet_conf} < ${seafile_conf} < ${gunicorn_conf} < ${seafdav_conf} < ${dest_settings_py} </dev/null 1>&2; then + echo "Failed to sync ccnet groupmgr database." + err_and_quit; +fi + +ccnet_config_db=${TOPDIR}/ccnet/misc/config.db +ccnet_config_sql=${INSTALLPATH}/sql/sqlite/config.sql +if ! sqlite3 ${ccnet_config_db} ".read ${ccnet_config_sql}" 2>/dev/null 1>&2; then + echo "Failed to sync ccnet config database." + err_and_quit; +fi + +ccnet_org_db=${TOPDIR}/ccnet/OrgMgr/orgmgr.db +ccnet_org_sql=${INSTALLPATH}/sql/sqlite/org.sql +if ! sqlite3 ${ccnet_org_db} ".read ${ccnet_org_sql}" 2>/dev/null 1>&2; then + echo "Failed to sync ccnet org database." + err_and_quit; +fi + +ccnet_user_db=${TOPDIR}/ccnet/PeerMgr/usermgr.db +ccnet_user_sql=${INSTALLPATH}/sql/sqlite/user.sql +if ! sqlite3 ${ccnet_user_db} ".read ${ccnet_user_sql}" 2>/dev/null 1>&2; then + echo "Failed to sync ccnet user database." + err_and_quit; +fi + +seafile_db=${TOPDIR}/seafile-data/seafile.db +seafile_sql=${INSTALLPATH}/sql/sqlite/seafile.sql +if ! sqlite3 ${seafile_db} ".read ${seafile_sql}" 2>/dev/null 1>&2; then + echo "Failed to sync seafile database." + err_and_quit; +fi + +seahub_db=${TOPDIR}/seahub.db +seahub_sqls=${INSTALLPATH}/seahub/sql/sqlite3.sql +if ! sqlite3 ${seahub_db} ".read ${seahub_sqls}" 2>/dev/null 1>&2; then + echo "Failed to sync seahub database." + err_and_quit; +fi +echo +echo "Done." + +# prepare avatar folder + +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +# Make a seafile-server symlink, like this: +# /data/haiwen/ +# -- seafile-server-2.0.4 +# -- seafile-server-latest # symlink to 2.0.4 +seafile_server_symlink=${TOPDIR}/seafile-server-latest +echo +echo -n "creating seafile-server-latest symbolic link ... " +if ! ln -s $(basename ${INSTALLPATH}) ${seafile_server_symlink}; then + echo + echo + echo "Failed to create symbolic link ${seafile_server_symlink}" + err_and_quit; +fi +echo "done" +echo + +chmod 0600 "$dest_settings_py" +chmod 0700 "$default_ccnet_conf_dir" +chmod 0700 "$default_seafile_data_dir" +chmod 0700 "$default_conf_dir" + +if [[ -d ${INSTALLPATH}/pro ]]; then + pro_py=${INSTALLPATH}/pro/pro.py + $PYTHON ${pro_py} setup +fi + +# ------------------------------------------- +# copy user manuals to library template +# ------------------------------------------- +copy_user_manuals; + +# ------------------------------------------- +# final message +# ------------------------------------------- + +sleep 1 + +echo +echo "-----------------------------------------------------------------" +echo "Your seafile server configuration has been completed successfully." +echo "-----------------------------------------------------------------" +echo +echo "run seafile server: ./seafile.sh { start | stop | restart }" +echo "run seahub server: ./seahub.sh { start | stop | restart }" +echo +echo "-----------------------------------------------------------------" +echo "If the server is behind a firewall, remember to open these tcp ports:" +echo "-----------------------------------------------------------------" +echo +echo "port of seafile fileserver: ${fileserver_port}" +echo "port of seahub: 8000" +echo +echo -e "When problems occur, refer to\n" +echo -e " ${server_manual_http}\n" +echo "for more information." +echo diff --git a/scripts/sqlite2mysql.py b/scripts/sqlite2mysql.py new file mode 100644 index 0000000000..dfd9a395fb --- /dev/null +++ b/scripts/sqlite2mysql.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +"""Lifted from: +http://stackoverflow.com/questions/18671/quick-easy-way-to-migrate-sqlite3-to-mysql + +Run like so: + +sqlite3 .db .dump | python sqlite2mysql.py > .sql + +Then you can import the .sql file into MySql + +Note - you need to add foreign key constrains manually since sqlite doesn't actually support them +""" +import re +import fileinput + +def this_line_is_useless(line): + useless_es = [ + 'BEGIN TRANSACTION', + 'COMMIT', + 'sqlite_sequence', + 'CREATE UNIQUE INDEX', + 'PRAGMA', + ] + for useless in useless_es: + if re.search(useless, line): + return True + +def has_primary_key(line): + return bool(re.search(r'PRIMARY KEY', line)) + +for line in fileinput.input(): + searching_for_end = False + if this_line_is_useless(line): continue + + # this line was necessary because ''); was getting + # converted (inappropriately) to \'); + if re.match(r".*, ''\);", line): + line = re.sub(r"''\);", r'``);', line) + + if re.match(r'^CREATE TABLE.*', line): + searching_for_end = True + + m = re.search('CREATE TABLE [`"]?(\w*)[`"]?(.*)', line) + if m: + name, sub = m.groups() + sub = sub.replace('"','`') + line = "DROP TABLE IF EXISTS `%(name)s`;\nCREATE TABLE IF NOT EXISTS `%(name)s`%(sub)s\n" + line = line % dict(name=name, sub=sub) + else: + m = re.search('INSERT INTO "(\w*)"(.*)', line) + if m: + name, sub = m.groups() + line = 'INSERT INTO `%s`%s\n' % m.groups() + line = line.replace('"', r'\"') + line = line.replace('"', "'") + # line = re.sub(r"([^'])'t'(.)", r"\1THIS_IS_TRUE\2", line) + # line = line.replace('THIS_IS_TRUE', '1') + # line = re.sub(r"([^'])'f'(.)", r"\1THIS_IS_FALSE\2", line) + # line = line.replace('THIS_IS_FALSE', '0') + + # Add auto_increment if it's not there since sqlite auto_increments ALL + # primary keys + if searching_for_end: + if re.search(r"integer(?:\s+\w+)*\s*PRIMARY KEY(?:\s+\w+)*\s*,", line, re.I): + line = line.replace("PRIMARY KEY", "PRIMARY KEY AUTO_INCREMENT") + # replace " and ' with ` because mysql doesn't like quotes in CREATE commands + line = line.replace('"', '`').replace("'", '`') + + # And now we convert it back (see above) + if re.match(r".*, ``\);", line): + line = re.sub(r'``\);', r"'');", line) + + if searching_for_end and re.match(r'.*\);', line): + searching_for_end = False + + if re.match(r"CREATE INDEX", line): + line = re.sub('"', '`', line) + + line = line.replace('"', '`') + line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT') + print(line) diff --git a/scripts/sqlite2mysql.sh b/scripts/sqlite2mysql.sh new file mode 100755 index 0000000000..9c4fccf6d7 --- /dev/null +++ b/scripts/sqlite2mysql.sh @@ -0,0 +1,118 @@ +#!/bin/sh +# +# This shell script and corresponding sqlite2mysql.py are used to +# migrate Seafile data from SQLite to MySQL. +# +# Setup: +# +# 1. Move this file and sqlite2mysql.py to the top directory of your Seafile +# installation path (e.g. /data/haiwen). +# 2. Run: ./sqlite2mysql.sh +# 3. Three files(ccnet-db.sql, seafile-db.sql, seahub-db.sql) are created. +# 4. Loads these files to MySQL +# (mysql> source ccnet-db.sql) +# + +CCNET_DB='ccnet-db.sql' +SEAFILE_DB='seafile-db.sql' +SEAHUB_DB='seahub-db.sql' + +########## ccnet +seafile_path=$(pwd) +if [ -f "${seafile_path}/conf/ccnet.conf" ]; then + USER_MGR_DB=${seafile_path}/ccnet/PeerMgr/usermgr.db + GRP_MGR_DB=${seafile_path}/ccnet/GroupMgr/groupmgr.db +else + echo "${seafile_path}/conf/ccnet.conf does not exists." + read -p "Please provide your ccnet.conf path(e.g. /data/haiwen/conf/ccnet.conf): " ccnet_conf_path + if [ -f ${ccnet_conf_path} ]; then + USER_MGR_DB=$(dirname $(dirname "${ccnet_conf_path}"))/ccnet/PeerMgr/usermgr.db + GRP_MGR_DB=$(dirname $(dirname "${ccnet_conf_path}"))/ccnet/GroupMgr/groupmgr.db + else + echo "${ccnet_conf_path} does not exists, quit." + exit 1 + fi +fi + +rm -rf ${CCNET_DB} + +echo "sqlite3 ${USER_MGR_DB} .dump | python sqlite2mysql.py > ${CCNET_DB}" +sqlite3 ${USER_MGR_DB} .dump | python sqlite2mysql.py > ${CCNET_DB} +echo "sqlite3 ${GRP_MGR_DB} .dump | python sqlite2mysql.py >> ${CCNET_DB}" +sqlite3 ${GRP_MGR_DB} .dump | python sqlite2mysql.py >> ${CCNET_DB} + +# change ctime from INTEGER to BIGINT in EmailUser table +sed 's/ctime INTEGER/ctime BIGINT/g' ${CCNET_DB} > ${CCNET_DB}.tmp && mv ${CCNET_DB}.tmp ${CCNET_DB} + +# change email in UserRole from TEXT to VARCHAR(255) +sed 's/email TEXT, role TEXT/email VARCHAR(255), role TEXT/g' ${CCNET_DB} > ${CCNET_DB}.tmp && mv ${CCNET_DB}.tmp ${CCNET_DB} + +########## seafile +rm -rf ${SEAFILE_DB} + +if [ -f "${seafile_path}/seafile-data/seafile.db" ]; then + echo "sqlite3 ${seafile_path}/seafile-data/seafile.db .dump | python sqlite2mysql.py > ${SEAFILE_DB}" + sqlite3 ${seafile_path}/seafile-data/seafile.db .dump | python sqlite2mysql.py > ${SEAFILE_DB} +else + echo "${seafile_path}/seafile-data/seafile.db does not exists." + read -p "Please provide your seafile.db path(e.g. /data/haiwen/seafile-data/seafile.db): " seafile_db_path + if [ -f ${seafile_db_path} ];then + echo "sqlite3 ${seafile_db_path} .dump | python sqlite2mysql.py > ${SEAFILE_DB}" + sqlite3 ${seafile_db_path} .dump | python sqlite2mysql.py > ${SEAFILE_DB} + else + echo "${seafile_db_path} does not exists, quit." + exit 1 + fi +fi + +# change owner_id in RepoOwner from TEXT to VARCHAR(255) +sed 's/owner_id TEXT/owner_id VARCHAR(255)/g' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} + +# change user_name in RepoGroup from TEXT to VARCHAR(255) +sed 's/user_name TEXT/user_name VARCHAR(255)/g' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} + +########## seahub +rm -rf ${SEAHUB_DB} + +if [ -f "${seafile_path}/seahub.db" ]; then + echo "sqlite3 ${seafile_path}/seahub.db .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}" + sqlite3 ${seafile_path}/seahub.db .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB} +else + echo "${seafile_path}/seahub.db does not exists." + read -p "Please prove your seahub.db path(e.g. /data/haiwen/seahub.db): " seahub_db_path + if [ -f ${seahub_db_path} ]; then + echo "sqlite3 ${seahub_db_path} .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}" + sqlite3 ${seahub_db_path} .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB} + else + echo "${seahub_db_path} does not exists, quit." + exit 1 + fi +fi + +# change username from VARCHAR(256) to VARCHAR(255) in wiki_personalwiki +sed 's/varchar(256) NOT NULL UNIQUE/varchar(255) NOT NULL UNIQUE/g' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + +# remove unique from contacts_contact +sed 's/, UNIQUE (`user_email`, `contact_email`)//g' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + +# remove base_dirfileslastmodifiedinfo records to avoid json string parsing issue between sqlite and mysql +sed '/INSERT INTO `base_dirfileslastmodifiedinfo`/d' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + +# remove notifications_usernotification records to avoid json string parsing issue between sqlite and mysql +sed '/INSERT INTO `notifications_usernotification`/d' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + + +########## common logic + +# add ENGIN=INNODB to create table statment +for sql_file in $CCNET_DB $SEAFILE_DB $SEAHUB_DB +do + sed -r 's/(CREATE TABLE.*);/\1 ENGINE=INNODB;/g' $sql_file > $sql_file.tmp && mv $sql_file.tmp $sql_file +done + +# remove COLLATE NOCASE if possible +for sql_file in $CCNET_DB $SEAFILE_DB $SEAHUB_DB +do + sed 's/COLLATE NOCASE//g' $sql_file > $sql_file.tmp && mv $sql_file.tmp $sql_file +done + diff --git a/scripts/upgrade/add_collate.sh b/scripts/upgrade/add_collate.sh new file mode 100755 index 0000000000..e85a74d73e --- /dev/null +++ b/scripts/upgrade/add_collate.sh @@ -0,0 +1,75 @@ +#!/bin/sh +# +# This shell script is used to add COLLATE NOCASE to email field to avoid case +# issue in sqlite. +# +# 1. ./add-collate.sh +# + +USER_DB='/tmp/user-db.sql' +GROUP_DB='/tmp/group-db.sql' +SEAFILE_DB='/tmp/seafile-db.sql' +SEAHUB_DB='/tmp/seahub-db.sql' + +ccnet_dir=$1 + +########## ccnet +USER_MGR_DB=${ccnet_dir}/PeerMgr/usermgr.db +GRP_MGR_DB=${ccnet_dir}/GroupMgr/groupmgr.db + +rm -rf ${USER_DB} +rm -rf ${GROUP_DB} + +echo "sqlite3 ${USER_MGR_DB} .dump > ${USER_DB}" +sqlite3 ${USER_MGR_DB} .dump > ${USER_DB} +echo "sqlite3 ${GRP_MGR_DB} .dump > ${GROUP_DB}" +sqlite3 ${GRP_MGR_DB} .dump > ${GROUP_DB} + +sed -r 's/(CREATE TABLE EmailUser.*)email TEXT,(.*)/\1email TEXT COLLATE NOCASE,\2/I' ${USER_DB} > ${USER_DB}.tmp && mv ${USER_DB}.tmp ${USER_DB} +sed -r 's/(CREATE TABLE Binding.*)email TEXT,(.*)/\1email TEXT COLLATE NOCASE,\2/I' ${USER_DB} > ${USER_DB}.tmp && mv ${USER_DB}.tmp ${USER_DB} +sed -r 's/(CREATE TABLE `Group`.*)`creator_name` VARCHAR\(255\),(.*)/\1`creator_name` VARCHAR\(255\) COLLATE NOCASE,\2/I' ${GROUP_DB} > ${GROUP_DB}.tmp && mv ${GROUP_DB}.tmp ${GROUP_DB} +sed -r 's/(CREATE TABLE `GroupUser`.*)`user_name` VARCHAR\(255\),(.*)/\1`user_name` VARCHAR\(255\) COLLATE NOCASE,\2/I' ${GROUP_DB} > ${GROUP_DB}.tmp && mv ${GROUP_DB}.tmp ${GROUP_DB} + +# backup & restore +mv ${USER_MGR_DB} ${USER_MGR_DB}.`date +"%Y%m%d%H%M%S"` +mv ${GRP_MGR_DB} ${GRP_MGR_DB}.`date +"%Y%m%d%H%M%S"` +sqlite3 ${USER_MGR_DB} < ${USER_DB} +sqlite3 ${GRP_MGR_DB} < ${GROUP_DB} + +########## seafile +rm -rf ${SEAFILE_DB} + +SEAFILE_DB_FILE=$2/seafile.db +echo "sqlite3 ${SEAFILE_DB_FILE} .dump > ${SEAFILE_DB}" +sqlite3 ${SEAFILE_DB_FILE} .dump > ${SEAFILE_DB} + +sed -r 's/(CREATE TABLE RepoOwner.*)owner_id TEXT(.*)/\1owner_id TEXT COLLATE NOCASE\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} +sed -r 's/(CREATE TABLE RepoGroup.*)user_name TEXT,(.*)/\1user_name TEXT COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} +sed -r 's/(CREATE TABLE RepoUserToken.*)email VARCHAR\(255\),(.*)/\1email VARCHAR\(255\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} +sed -r 's/(CREATE TABLE UserQuota.*)user VARCHAR\(255\),(.*)/\1user VARCHAR\(255\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} +sed -r 's/(CREATE TABLE SharedRepo.*)from_email VARCHAR\(512\), to_email VARCHAR\(512\),(.*)/\1from_email VARCHAR\(512\), to_email VARCHAR\(512\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} + +# backup & restore +mv ${SEAFILE_DB_FILE} ${SEAFILE_DB_FILE}.`date +"%Y%m%d%H%M%S"` +sqlite3 ${SEAFILE_DB_FILE} < ${SEAFILE_DB} + +########## seahub +rm -rf ${SEAHUB_DB} + +SEAHUB_DB_FILE=$3 +echo "sqlite3 ${SEAHUB_DB_FILE} .Dump | tr -d '\n' | sed 's/;/;\n/g' > ${SEAHUB_DB}" +sqlite3 ${SEAHUB_DB_FILE} .dump | tr -d '\n' | sed 's/;/;\n/g' > ${SEAHUB_DB} + +sed -r 's/(CREATE TABLE "notifications_usernotification".*)"to_user" varchar\(255\) NOT NULL,(.*)/\1"to_user" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "profile_profile".*)"user" varchar\(75\) NOT NULL UNIQUE,(.*)/\1"user" varchar\(75\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "share_fileshare".*)"username" varchar\(255\) NOT NULL,(.*)/\1"username" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "api2_token".*)"user" varchar\(255\) NOT NULL UNIQUE,(.*)/\1"user" varchar\(255\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "wiki_personalwiki".*)"username" varchar\(256\) NOT NULL UNIQUE,(.*)/\1"username" varchar\(256\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "message_usermessage".*)"from_email" varchar\(75\) NOT NULL,\s*"to_email" varchar\(75\) NOT NULL,(.*)/\1"from_email" varchar\(75\) NOT NULL COLLATE NOCASE, "to_email" varchar\(75\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "avatar_avatar".*)"emailuser" varchar\(255\) NOT NULL,(.*)/\1"emailuser" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + +# backup & restore +mv ${SEAHUB_DB_FILE} ${SEAHUB_DB_FILE}.`date +"%Y%m%d%H%M%S"` +sqlite3 ${SEAHUB_DB_FILE} < ${SEAHUB_DB} + +rm -rf ${USER_DB} ${GROUP_DB} ${SEAFILE_DB} ${SEAHUB_DB} diff --git a/scripts/upgrade/check_backend.py b/scripts/upgrade/check_backend.py new file mode 100644 index 0000000000..38e7e14739 --- /dev/null +++ b/scripts/upgrade/check_backend.py @@ -0,0 +1,41 @@ +#coding: UTF-8 + +import os +import sys +import configparser + + +def main(): + cfg = configparser.ConfigParser() + seafile_conf_dir = os.environ['SEAFILE_CONF_DIR'] + seafile_conf = os.path.join(seafile_conf_dir, 'seafile.conf') + cfg.read(seafile_conf) + + sections_map = { + 'blocks': 'block_backend', + 'fs': 'fs_object_backend', + 'commits': 'commit_object_backend', + } + + backends = {} + for name, section in sections_map.items(): + if cfg.has_option(section, 'name'): + backend_name = cfg.get(section, 'name') + else: + backend_name = 'fs' + backends[name] = backend_name + + if any([ bend == 's3' for bend in list(backends.values()) ]): + print('s3') + return + + if any([ bend == 'ceph' for bend in list(backends.values()) ]): + print('ceph') + return + +if __name__ == '__main__': + try: + main() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.flush() diff --git a/scripts/upgrade/db_update_1.3_1.4.py b/scripts/upgrade/db_update_1.3_1.4.py new file mode 100644 index 0000000000..864efa74e0 --- /dev/null +++ b/scripts/upgrade/db_update_1.3_1.4.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 + +import sqlite3 +import os +import sys + +def usage(): + msg = 'usage: %s ' % os.path.basename(sys.argv[0]) + print(msg) + +def main(): + seahub_db = sys.argv[1] + + conn = sqlite3.connect(seahub_db) + c = conn.cursor() + + try: + c.execute('SELECT s_type from share_fileshare') + except sqlite3.OperationalError: + # only add this column if not exist yet, so this script is idempotent + c.execute('ALTER table share_fileshare add column "s_type" varchar(2) NOT NULL DEFAULT "f"') + + c.execute('CREATE INDEX IF NOT EXISTS "share_fileshare_f775835c" ON "share_fileshare" ("s_type")') + + sql = '''CREATE TABLE IF NOT EXISTS "base_dirfileslastmodifiedinfo" ( + "id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, + "repo_id" varchar(36) NOT NULL, + "parent_dir" text NOT NULL, + "parent_dir_hash" varchar(12) NOT NULL, + "dir_id" varchar(40) NOT NULL, + "last_modified_info" text NOT NULL, + UNIQUE ("repo_id", "parent_dir_hash"))''' + + c.execute(sql) + + sql = '''CREATE TABLE IF NOT EXISTS "api2_token" ( + "key" varchar(40) NOT NULL PRIMARY KEY, + "user" varchar(255) NOT NULL UNIQUE, + "created" datetime NOT NULL)''' + + c.execute(sql) + + conn.commit() + +if __name__ == '__main__': + if len(sys.argv) != 2: + usage() + sys.exit(1) + + main() + + diff --git a/scripts/upgrade/db_update_helper.py b/scripts/upgrade/db_update_helper.py new file mode 100644 index 0000000000..007f84d528 --- /dev/null +++ b/scripts/upgrade/db_update_helper.py @@ -0,0 +1,386 @@ +# coding: UTF-8 + +import sys +import os +import configparser +import glob + +HAS_PYMYSQL = True +try: + import pymysql +except ImportError: + HAS_PYMYSQL = False + +HAS_SQLITE3 = True +try: + import sqlite3 +except ImportError: + HAS_SQLITE3 = False + +class EnvManager(object): + def __init__(self): + self.upgrade_dir = os.path.dirname(__file__) + self.install_path = os.path.dirname(self.upgrade_dir) + self.top_dir = os.path.dirname(self.install_path) + self.ccnet_dir = os.environ['CCNET_CONF_DIR'] + self.seafile_dir = os.environ['SEAFILE_CONF_DIR'] + self.central_config_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR') + self.seafevents_db_dir = os.path.join(os.path.dirname(self.install_path), 'pro-data') + + +env_mgr = EnvManager() + + +class Utils(object): + @staticmethod + def highlight(content, is_error=False): + '''Add ANSI color to content to get it highlighted on terminal''' + if is_error: + return '\x1b[1;31m%s\x1b[m' % content + else: + return '\x1b[1;32m%s\x1b[m' % content + + @staticmethod + def info(msg): + print(Utils.highlight('[INFO] ') + msg) + + @staticmethod + def warning(msg): + print(Utils.highlight('[WARNING] ') + msg) + + @staticmethod + def error(msg): + print(Utils.highlight('[ERROR] ') + msg) + sys.exit(1) + + @staticmethod + def read_config(config_path, defaults): + if not os.path.exists(config_path): + Utils.error('Config path %s doesn\'t exist, stop db upgrade' % + config_path) + cp = configparser.ConfigParser(defaults) + cp.read(config_path) + return cp + + +class MySQLDBInfo(object): + def __init__(self, host, port, username, password, db, unix_socket=None): + self.host = host + self.port = port + self.username = username + self.password = password + self.db = db + self.unix_socket = unix_socket + + +class DBUpdater(object): + def __init__(self, version, name): + self.sql_dir = os.path.join(env_mgr.upgrade_dir, 'sql', version, name) + pro_path = os.path.join(env_mgr.install_path, 'pro') + self.is_pro = os.path.exists(pro_path) + self.version = version + + @staticmethod + def get_instance(version): + '''Detect whether we are using mysql or sqlite3''' + ccnet_db_info = DBUpdater.get_ccnet_mysql_info(version) + seafile_db_info = DBUpdater.get_seafile_mysql_info(version) + seahub_db_info = DBUpdater.get_seahub_mysql_info() + + if ccnet_db_info and seafile_db_info and seahub_db_info: + Utils.info('You are using MySQL') + if not HAS_PYMYSQL: + Utils.error('Python pymysql module is not found') + updater = MySQLDBUpdater(version, ccnet_db_info, seafile_db_info, seahub_db_info) + + elif (ccnet_db_info is None) and (seafile_db_info is None) and (seahub_db_info is None): + Utils.info('You are using SQLite3') + if not HAS_SQLITE3: + Utils.error('Python sqlite3 module is not found') + updater = SQLiteDBUpdater(version) + + else: + def to_db_string(info): + if info is None: + return 'SQLite3' + else: + return 'MySQL' + Utils.error('Error:\n ccnet is using %s\n seafile is using %s\n seahub is using %s\n' + % (to_db_string(ccnet_db_info), + to_db_string(seafile_db_info), + to_db_string(seahub_db_info))) + + return updater + + def update_db(self): + ccnet_sql = os.path.join(self.sql_dir, 'ccnet.sql') + seafile_sql = os.path.join(self.sql_dir, 'seafile.sql') + seahub_sql = os.path.join(self.sql_dir, 'seahub.sql') + seafevents_sql = os.path.join(self.sql_dir, 'seafevents.sql') + + if os.path.exists(ccnet_sql): + Utils.info('updating ccnet database...') + self.update_ccnet_sql(ccnet_sql) + + if os.path.exists(seafile_sql): + Utils.info('updating seafile database...') + self.update_seafile_sql(seafile_sql) + + if os.path.exists(seahub_sql): + Utils.info('updating seahub database...') + self.update_seahub_sql(seahub_sql) + + if os.path.exists(seafevents_sql): + self.update_seafevents_sql(seafevents_sql) + + @staticmethod + def get_ccnet_mysql_info(version): + if version > '5.0.0': + config_path = env_mgr.central_config_dir + else: + config_path = env_mgr.ccnet_dir + + ccnet_conf = os.path.join(config_path, 'ccnet.conf') + defaults = { + 'HOST': '127.0.0.1', + 'PORT': '3306', + 'UNIX_SOCKET': '', + } + + config = Utils.read_config(ccnet_conf, defaults) + db_section = 'Database' + + if not config.has_section(db_section): + return None + + type = config.get(db_section, 'ENGINE') + if type != 'mysql': + return None + + try: + host = config.get(db_section, 'HOST') + port = config.getint(db_section, 'PORT') + username = config.get(db_section, 'USER') + password = config.get(db_section, 'PASSWD') + db = config.get(db_section, 'DB') + unix_socket = config.get(db_section, 'UNIX_SOCKET') + except configparser.NoOptionError as e: + Utils.error('Database config in ccnet.conf is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db, unix_socket) + return info + + @staticmethod + def get_seafile_mysql_info(version): + if version > '5.0.0': + config_path = env_mgr.central_config_dir + else: + config_path = env_mgr.seafile_dir + + seafile_conf = os.path.join(config_path, 'seafile.conf') + defaults = { + 'HOST': '127.0.0.1', + 'PORT': '3306', + 'UNIX_SOCKET': '', + } + config = Utils.read_config(seafile_conf, defaults) + db_section = 'database' + + if not config.has_section(db_section): + return None + + type = config.get(db_section, 'type') + if type != 'mysql': + return None + + try: + host = config.get(db_section, 'host') + port = config.getint(db_section, 'port') + username = config.get(db_section, 'user') + password = config.get(db_section, 'password') + db = config.get(db_section, 'db_name') + unix_socket = config.get(db_section, 'unix_socket') + except configparser.NoOptionError as e: + Utils.error('Database config in seafile.conf is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db, unix_socket) + return info + + @staticmethod + def get_seahub_mysql_info(): + sys.path.insert(0, env_mgr.top_dir) + if env_mgr.central_config_dir: + sys.path.insert(0, env_mgr.central_config_dir) + try: + import seahub_settings # pylint: disable=F0401 + except ImportError as e: + Utils.error('Failed to import seahub_settings.py: %s' % e) + + if not hasattr(seahub_settings, 'DATABASES'): + return None + + try: + d = seahub_settings.DATABASES['default'] + if d['ENGINE'] != 'django.db.backends.mysql': + return None + + host = d.get('HOST', '127.0.0.1') + port = int(d.get('PORT', 3306)) + username = d['USER'] + password = d['PASSWORD'] + db = d['NAME'] + unix_socket = host if host.startswith('/') else None + except KeyError: + Utils.error('Database config in seahub_settings.py is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db, unix_socket) + return info + + def update_ccnet_sql(self, ccnet_sql): + raise NotImplementedError + + def update_seafile_sql(self, seafile_sql): + raise NotImplementedError + + def update_seahub_sql(self, seahub_sql): + raise NotImplementedError + + def update_seafevents_sql(self, seafevents_sql): + raise NotImplementedError + +class CcnetSQLiteDB(object): + def __init__(self, ccnet_dir): + self.ccnet_dir = ccnet_dir + + def get_db(self, dbname): + dbs = ( + 'ccnet.db', + 'GroupMgr/groupmgr.db', + 'misc/config.db', + 'OrgMgr/orgmgr.db', + 'PeerMgr/usermgr.db', + ) + for db in dbs: + if os.path.splitext(os.path.basename(db))[0] == dbname: + return os.path.join(self.ccnet_dir, db) + +class SQLiteDBUpdater(DBUpdater): + def __init__(self, version): + DBUpdater.__init__(self, version, 'sqlite3') + + self.ccnet_db = CcnetSQLiteDB(env_mgr.ccnet_dir) + self.seafile_db = os.path.join(env_mgr.seafile_dir, 'seafile.db') + self.seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db') + self.seafevents_db = os.path.join(env_mgr.seafevents_db_dir, 'seafevents.db') + + def update_db(self): + super(SQLiteDBUpdater, self).update_db() + for sql_path in glob.glob(os.path.join(self.sql_dir, 'ccnet', '*.sql')): + self.update_ccnet_sql(sql_path) + + def apply_sqls(self, db_path, sql_path): + with open(sql_path, 'r') as fp: + lines = fp.read().split(';') + + with sqlite3.connect(db_path) as conn: + for line in lines: + line = line.strip() + if not line: + continue + else: + conn.execute(line) + + def update_ccnet_sql(self, sql_path): + dbname = os.path.splitext(os.path.basename(sql_path))[0] + self.apply_sqls(self.ccnet_db.get_db(dbname), sql_path) + + def update_seafile_sql(self, sql_path): + self.apply_sqls(self.seafile_db, sql_path) + + def update_seahub_sql(self, sql_path): + self.apply_sqls(self.seahub_db, sql_path) + + def update_seafevents_sql(self, sql_path): + if self.is_pro: + Utils.info('seafevents do not support sqlite3 database') + + +class MySQLDBUpdater(DBUpdater): + def __init__(self, version, ccnet_db_info, seafile_db_info, seahub_db_info): + DBUpdater.__init__(self, version, 'mysql') + self.ccnet_db_info = ccnet_db_info + self.seafile_db_info = seafile_db_info + self.seahub_db_info = seahub_db_info + + def update_ccnet_sql(self, ccnet_sql): + self.apply_sqls(self.ccnet_db_info, ccnet_sql) + + def update_seafile_sql(self, seafile_sql): + self.apply_sqls(self.seafile_db_info, seafile_sql) + + def update_seahub_sql(self, seahub_sql): + self.apply_sqls(self.seahub_db_info, seahub_sql) + + def update_seafevents_sql(self, seafevents_sql): + if self.is_pro: + Utils.info('updating seafevents database...') + self.apply_sqls(self.seahub_db_info, seafevents_sql) + + def get_conn(self, info): + kw = dict( + user=info.username, + passwd=info.password, + db=info.db, + ) + if info.unix_socket: + kw['unix_socket'] = info.unix_socket + else: + kw['host'] = info.host + kw['port'] = info.port + try: + conn = pymysql.connect(**kw) + except Exception as e: + if isinstance(e, pymysql.OperationalError): + msg = str(e.args[1]) + else: + msg = str(e) + Utils.error('Failed to connect to mysql database %s: %s' % (info.db, msg)) + + return conn + + def execute_sql(self, conn, sql): + cursor = conn.cursor() + try: + cursor.execute(sql) + conn.commit() + except Exception as e: + msg = str(e) + Utils.warning('Failed to execute sql: %s' % msg) + + def apply_sqls(self, info, sql_path): + with open(sql_path, 'r') as fp: + lines = fp.read().split(';') + + conn = self.get_conn(info) + + for line in lines: + line = line.strip() + if not line: + continue + else: + self.execute_sql(conn, line) + + +def main(): + skipdb = os.environ.get('SEAFILE_SKIP_DB_UPGRADE', '').lower() + if skipdb in ('1', 'true', 'on'): + print('Database upgrade skipped because SEAFILE_SKIP_DB_UPGRADE=%s' % skipdb) + sys.exit() + version = sys.argv[1] + db_updater = DBUpdater.get_instance(version) + db_updater.update_db() + + return 0 + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/fix_mysql_user.py b/scripts/upgrade/fix_mysql_user.py new file mode 100644 index 0000000000..d7fd824b08 --- /dev/null +++ b/scripts/upgrade/fix_mysql_user.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 + +import os +import sys +import re +import configparser +import getpass +from collections import namedtuple + +try: + import pymysql + HAS_PYMYSQL = True +except ImportError: + HAS_PYMYSQL = False + +MySQLDBInfo = namedtuple('MySQLDBInfo', 'host port username password db') + +class EnvManager(object): + def __init__(self): + self.upgrade_dir = os.path.abspath(os.path.dirname(__file__)) + self.install_path = os.path.dirname(self.upgrade_dir) + self.top_dir = os.path.dirname(self.install_path) + self.ccnet_dir = os.environ['CCNET_CONF_DIR'] + self.seafile_dir = os.environ['SEAFILE_CONF_DIR'] + +env_mgr = EnvManager() + +class Utils(object): + @staticmethod + def highlight(content, is_error=False): + '''Add ANSI color to content to get it highlighted on terminal''' + if is_error: + return '\x1b[1;31m%s\x1b[m' % content + else: + return '\x1b[1;32m%s\x1b[m' % content + + @staticmethod + def info(msg): + print(Utils.highlight('[INFO] ') + msg) + + @staticmethod + def error(msg): + print(Utils.highlight('[ERROR] ') + msg) + sys.exit(1) + + @staticmethod + def read_config(config_path, defaults): + cp = configparser.ConfigParser(defaults) + cp.read(config_path) + return cp + +def get_ccnet_mysql_info(): + ccnet_conf = os.path.join(env_mgr.ccnet_dir, 'ccnet.conf') + defaults = { + 'HOST': '127.0.0.1', + 'PORT': '3306', + } + + config = Utils.read_config(ccnet_conf, defaults) + db_section = 'Database' + + if not config.has_section(db_section): + return None + + type = config.get(db_section, 'ENGINE') + if type != 'mysql': + return None + + try: + host = config.get(db_section, 'HOST') + port = config.getint(db_section, 'PORT') + username = config.get(db_section, 'USER') + password = config.get(db_section, 'PASSWD') + db = config.get(db_section, 'DB') + except configparser.NoOptionError as e: + Utils.error('Database config in ccnet.conf is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db) + return info + +def get_seafile_mysql_info(): + seafile_conf = os.path.join(env_mgr.seafile_dir, 'seafile.conf') + defaults = { + 'HOST': '127.0.0.1', + 'PORT': '3306', + } + config = Utils.read_config(seafile_conf, defaults) + db_section = 'database' + + if not config.has_section(db_section): + return None + + type = config.get(db_section, 'type') + if type != 'mysql': + return None + + try: + host = config.get(db_section, 'host') + port = config.getint(db_section, 'port') + username = config.get(db_section, 'user') + password = config.get(db_section, 'password') + db = config.get(db_section, 'db_name') + except configparser.NoOptionError as e: + Utils.error('Database config in seafile.conf is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db) + return info + +def get_seahub_mysql_info(): + sys.path.insert(0, env_mgr.top_dir) + try: + import seahub_settings# pylint: disable=F0401 + except ImportError as e: + Utils.error('Failed to import seahub_settings.py: %s' % e) + + if not hasattr(seahub_settings, 'DATABASES'): + return None + + try: + d = seahub_settings.DATABASES['default'] + if d['ENGINE'] != 'django.db.backends.mysql': + return None + + host = d.get('HOST', '127.0.0.1') + port = int(d.get('PORT', 3306)) + username = d['USER'] + password = d['PASSWORD'] + db = d['NAME'] + except KeyError: + Utils.error('Database config in seahub_settings.py is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db) + return info + +def get_seafile_db_infos(): + ccnet_db_info = get_ccnet_mysql_info() + seafile_db_info = get_seafile_mysql_info() + seahub_db_info = get_seahub_mysql_info() + + infos = [ccnet_db_info, seafile_db_info, seahub_db_info] + + for info in infos: + if info is None: + return None + if info.host not in ('localhost', '127.0.0.1'): + return None + return infos + +def ask_root_password(port): + while True: + desc = 'What is the root password for mysql? ' + password = getpass.getpass(desc).strip() + if password: + try: + return check_mysql_user('root', password, port) + except InvalidAnswer as e: + print('\n%s\n' % e) + continue + +class InvalidAnswer(Exception): + def __init__(self, msg): + Exception.__init__(self) + self.msg = msg + + def __str__(self): + return self.msg + +def check_mysql_user(user, password, port): + print('\nverifying password of root user %s ... ' % user, end=' ') + kwargs = dict(host='localhost', + port=port, + user=user, + passwd=password) + + try: + conn = pymysql.connect(**kwargs) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' + % (user, e.args[1])) + else: + raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' + % (user, e)) + + print('done') + return conn + +def apply_fix(root_conn, user, dbs): + for db in dbs: + grant_db_permission(root_conn, user, db) + + cursor = root_conn.cursor() + sql = """ + SELECT * + FROM mysql.user + WHERE Host = '%%' + AND password = '' + AND User = '%s' + """ % user + cursor.execute(sql) + if cursor.rowcount > 0: + sql = 'DROP USER `%s`@`%%`' % user + cursor.execute(sql) + +def grant_db_permission(conn, user, db): + cursor = conn.cursor() + sql = '''GRANT ALL PRIVILEGES ON `%s`.* to `%s`@localhost ''' \ + % (db, user) + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to grant permission of database %s: %s' % (db, e.args[1])) + else: + Utils.error('Failed to grant permission of database %s: %s' % (db, e)) + + finally: + cursor.close() + +def main(): + dbinfos = get_seafile_db_infos() + if not dbinfos: + return + if dbinfos[0].username == 'root': + return + + if not HAS_PYMYSQL: + Utils.error('Python pymysql module is not found') + root_conn = ask_root_password(dbinfos[0].port) + apply_fix(root_conn, dbinfos[0].username, [info.db for info in dbinfos]) + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/minor-upgrade.sh b/scripts/upgrade/minor-upgrade.sh new file mode 100755 index 0000000000..1b514a6821 --- /dev/null +++ b/scripts/upgrade/minor-upgrade.sh @@ -0,0 +1,158 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ + +echo +echo "-------------------------------------------------------------" +echo "This script would do the minor upgrade for you." +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars +seafile_server_symlink=${TOPDIR}/seafile-server-latest +default_conf_dir=${TOPDIR}/conf +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +seahub_data_dir=${TOPDIR}/seahub-data +elasticsearch_config_file=${seafile_server_symlink}/pro/elasticsearch/config/jvm.options + +function migrate_avatars() { + echo + echo "------------------------------" + echo "migrating avatars ..." + echo + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + echo + echo "Error: avatars directory \"${dest_avatar_dir}\" does not exist" 2>&1 + echo + exit 1 + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo + echo "DONE" + echo "------------------------------" + echo +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function update_latest_symlink() { + # update the symlink seafile-server to the new server version + echo + echo "updating seafile-server-latest symbolic link to ${INSTALLPATH} ..." + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi +} + +function move_old_elasticsearch_config_to_latest() { + # Move the elasticsearch's configuration file from the old version to the new version + echo + echo "Moving the elasticsearch's configuration file ..." + echo + if [[ -f ${elasticsearch_config_file} ]]; then + /bin/cp -avf ${elasticsearch_config_file} ${INSTALLPATH}/pro/elasticsearch/config/jvm.options + fi +} + +function validate_seafile_data_dir() { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function rename_gunicorn_config() { + echo + echo "renaming the gunicorn.conf to gunicorn.conf.py ..." + echo + if [[ -f "${default_conf_dir}/gunicorn.conf" ]]; then + mv "${default_conf_dir}/gunicorn.conf" "${default_conf_dir}/gunicorn.conf.py" 1>/dev/null + fi + + if [[ -f "${default_conf_dir}/gunicorn.conf.py" ]]; then + echo 'Done' + else + echo "Failed to renamed the gunicorn.conf to gunicorn.conf.py." + exit 1 + fi +} + +validate_seafile_data_dir; +rename_gunicorn_config; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; + +move_old_elasticsearch_config_to_latest; + +update_latest_symlink; + + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/regenerate_secret_key.sh b/scripts/upgrade/regenerate_secret_key.sh new file mode 100755 index 0000000000..b59f44d182 --- /dev/null +++ b/scripts/upgrade/regenerate_secret_key.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") +UPGRADEDIR=$(dirname "${SCRIPT}") +INSTALLPATH=$(dirname "${UPGRADEDIR}") +TOPDIR=$(dirname "${INSTALLPATH}") + +seahub_secret_keygen=${INSTALLPATH}/seahub/tools/secret_key_generator.py +seahub_settings_py=${TOPDIR}/seahub_settings.py + +line="SECRET_KEY = \"$(python $seahub_secret_keygen)\"" + +sed -i -e "/SECRET_KEY/c\\$line" $seahub_settings_py diff --git a/scripts/upgrade/sql/1.6.0/mysql/seahub.sql b/scripts/upgrade/sql/1.6.0/mysql/seahub.sql new file mode 100644 index 0000000000..c870654949 --- /dev/null +++ b/scripts/upgrade/sql/1.6.0/mysql/seahub.sql @@ -0,0 +1,47 @@ +CREATE TABLE IF NOT EXISTS `wiki_groupwiki` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` int(11) NOT NULL, + `repo_id` varchar(36) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `group_id` (`group_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `wiki_personalwiki` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `username` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `group_publicgroup` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `group_publicgroup_425ae3c4` (`group_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `base_filediscuss` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_message_id` int(11) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `path_hash` varchar(12) NOT NULL, + PRIMARY KEY (`id`), + KEY `base_filediscuss_3c1a2584` (`group_message_id`), + KEY `base_filediscuss_6844bd5a` (`path_hash`), + CONSTRAINT `group_message_id_refs_id_2ade200f` FOREIGN KEY (`group_message_id`) REFERENCES `group_groupmessage` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `base_filelastmodifiedinfo` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `file_id` varchar(40) NOT NULL, + `file_path` longtext NOT NULL, + `file_path_hash` varchar(12) NOT NULL, + `last_modified` bigint(20) NOT NULL, + `email` varchar(75) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `repo_id` (`repo_id`,`file_path_hash`), + KEY `base_filelastmodifiedinfo_359081cc` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 ; \ No newline at end of file diff --git a/scripts/upgrade/sql/1.6.0/sqlite3/seahub.sql b/scripts/upgrade/sql/1.6.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..b1a974401a --- /dev/null +++ b/scripts/upgrade/sql/1.6.0/sqlite3/seahub.sql @@ -0,0 +1,39 @@ +CREATE TABLE IF NOT EXISTS "wiki_groupwiki" ( + "id" integer NOT NULL PRIMARY KEY, + "group_id" integer NOT NULL UNIQUE, + "repo_id" varchar(36) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "wiki_personalwiki" ( + "id" integer NOT NULL PRIMARY KEY, + "username" varchar(256) NOT NULL UNIQUE, + "repo_id" varchar(36) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "group_publicgroup" ( + "id" integer NOT NULL PRIMARY KEY, + "group_id" integer NOT NULL +); +CREATE INDEX IF NOT EXISTS "group_publicgroup_bda51c3c" ON "group_publicgroup" ("group_id"); + +CREATE TABLE IF NOT EXISTS "base_filediscuss" ( + "id" integer NOT NULL PRIMARY KEY, + "group_message_id" integer NOT NULL REFERENCES "group_groupmessage" ("id"), + "repo_id" varchar(40) NOT NULL, + "path" text NOT NULL, + "path_hash" varchar(12) NOT NULL +); +CREATE INDEX IF NOT EXISTS "base_filediscuss_6844bd5a" ON "base_filediscuss" ("path_hash"); +CREATE INDEX IF NOT EXISTS "base_filediscuss_c3e5da7c" ON "base_filediscuss" ("group_message_id"); + +CREATE TABLE IF NOT EXISTS "base_filelastmodifiedinfo" ( + "id" integer NOT NULL PRIMARY KEY, + "repo_id" varchar(36) NOT NULL, + "file_id" varchar(40) NOT NULL, + "file_path" text NOT NULL, + "file_path_hash" varchar(12) NOT NULL, + "last_modified" bigint NOT NULL, + "email" varchar(75) NOT NULL, + UNIQUE ("repo_id", "file_path_hash") +); +CREATE INDEX IF NOT EXISTS "base_filelastmodifiedinfo_ca6f7e34" ON "base_filelastmodifiedinfo" ("repo_id"); \ No newline at end of file diff --git a/scripts/upgrade/sql/1.7.0/mysql/seafile.sql b/scripts/upgrade/sql/1.7.0/mysql/seafile.sql new file mode 100644 index 0000000000..5dfc278c04 --- /dev/null +++ b/scripts/upgrade/sql/1.7.0/mysql/seafile.sql @@ -0,0 +1 @@ +CREATE INDEX repousertoken_email on RepoUserToken(email); diff --git a/scripts/upgrade/sql/1.7.0/mysql/seahub.sql b/scripts/upgrade/sql/1.7.0/mysql/seahub.sql new file mode 100644 index 0000000000..6ad01f3366 --- /dev/null +++ b/scripts/upgrade/sql/1.7.0/mysql/seahub.sql @@ -0,0 +1,17 @@ +CREATE TABLE `message_usermessage` ( + `message_id` int(11) NOT NULL AUTO_INCREMENT, + `message` varchar(512) NOT NULL, + `from_email` varchar(75) NOT NULL, + `to_email` varchar(75) NOT NULL, + `timestamp` datetime NOT NULL, + `ifread` tinyint(1) NOT NULL, + PRIMARY KEY (`message_id`), + KEY `message_usermessage_8b1dd4eb` (`from_email`), + KEY `message_usermessage_590d1560` (`to_email`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `message_usermsglastcheck` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `check_time` datetime NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; \ No newline at end of file diff --git a/scripts/upgrade/sql/1.7.0/sqlite3/seafile.sql b/scripts/upgrade/sql/1.7.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..9b5a1c32ee --- /dev/null +++ b/scripts/upgrade/sql/1.7.0/sqlite3/seafile.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS repousertoken_email on RepoUserToken(email); \ No newline at end of file diff --git a/scripts/upgrade/sql/1.7.0/sqlite3/seahub.sql b/scripts/upgrade/sql/1.7.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..c21efddca5 --- /dev/null +++ b/scripts/upgrade/sql/1.7.0/sqlite3/seahub.sql @@ -0,0 +1,16 @@ +CREATE TABLE IF NOT EXISTS "message_usermessage" ( + "message_id" integer NOT NULL PRIMARY KEY, + "message" varchar(512) NOT NULL, + "from_email" varchar(75) NOT NULL, + "to_email" varchar(75) NOT NULL, + "timestamp" datetime NOT NULL, + "ifread" bool NOT NULL +) +; +CREATE TABLE IF NOT EXISTS "message_usermsglastcheck" ( + "id" integer NOT NULL PRIMARY KEY, + "check_time" datetime NOT NULL +) +; +CREATE INDEX IF NOT EXISTS "message_usermessage_8b1dd4eb" ON "message_usermessage" ("from_email"); +CREATE INDEX IF NOT EXISTS "message_usermessage_590d1560" ON "message_usermessage" ("to_email"); diff --git a/scripts/upgrade/sql/1.8.0/mysql/ccnet.sql b/scripts/upgrade/sql/1.8.0/mysql/ccnet.sql new file mode 100644 index 0000000000..5ee7e0cb05 --- /dev/null +++ b/scripts/upgrade/sql/1.8.0/mysql/ccnet.sql @@ -0,0 +1,2 @@ +-- ccnet +ALTER TABLE EmailUser MODIFY passwd varchar(64); diff --git a/scripts/upgrade/sql/1.8.0/mysql/seahub.sql b/scripts/upgrade/sql/1.8.0/mysql/seahub.sql new file mode 100644 index 0000000000..f1c79e791d --- /dev/null +++ b/scripts/upgrade/sql/1.8.0/mysql/seahub.sql @@ -0,0 +1,30 @@ +-- seahub +ALTER TABLE group_groupmessage MODIFY message varchar(2048); +ALTER TABLE group_messagereply MODIFY message varchar(2048); + +CREATE TABLE IF NOT EXISTS `share_privatefiledirshare` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `from_user` varchar(255) NOT NULL, + `to_user` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `token` varchar(10) NOT NULL, + `permission` varchar(5) NOT NULL, + `s_type` varchar(5) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `token` (`token`), + KEY `share_privatefiledirshare_0e7efed3` (`from_user`), + KEY `share_privatefiledirshare_bc172800` (`to_user`), + KEY `share_privatefiledirshare_2059abe4` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `message_usermsgattachment` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user_msg_id` int(11) NOT NULL, + `priv_file_dir_share_id` int(11) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `message_usermsgattachment_72f290f5` (`user_msg_id`), + KEY `message_usermsgattachment_cee41a9a` (`priv_file_dir_share_id`), + CONSTRAINT `priv_file_dir_share_id_refs_id_163f8f83` FOREIGN KEY (`priv_file_dir_share_id`) REFERENCES `share_privatefiledirshare` (`id`), + CONSTRAINT `user_msg_id_refs_message_id_debb82ad` FOREIGN KEY (`user_msg_id`) REFERENCES `message_usermessage` (`message_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; \ No newline at end of file diff --git a/scripts/upgrade/sql/1.8.0/sqlite3/seahub.sql b/scripts/upgrade/sql/1.8.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..52658083c1 --- /dev/null +++ b/scripts/upgrade/sql/1.8.0/sqlite3/seahub.sql @@ -0,0 +1,20 @@ +CREATE TABLE IF NOT EXISTS "share_privatefiledirshare" ( + "id" integer NOT NULL PRIMARY KEY, + "from_user" varchar(255) NOT NULL, + "to_user" varchar(255) NOT NULL, + "repo_id" varchar(36) NOT NULL, + "path" text NOT NULL, + "token" varchar(10) NOT NULL UNIQUE, + "permission" varchar(5) NOT NULL, + "s_type" varchar(5) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "message_usermsgattachment" ( + "id" integer NOT NULL PRIMARY KEY, + "user_msg_id" integer NOT NULL REFERENCES "message_usermessage" ("message_id"), + "priv_file_dir_share_id" integer REFERENCES "share_privatefiledirshare" ("id") +); + +CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_0e7efed3" ON "share_privatefiledirshare" ("from_user"); +CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_2059abe4" ON "share_privatefiledirshare" ("repo_id"); +CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_bc172800" ON "share_privatefiledirshare" ("to_user"); \ No newline at end of file diff --git a/scripts/upgrade/sql/2.0.0/mysql/seahub.sql b/scripts/upgrade/sql/2.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..f7cb4ab722 --- /dev/null +++ b/scripts/upgrade/sql/2.0.0/mysql/seahub.sql @@ -0,0 +1,24 @@ +-- seahub +CREATE TABLE IF NOT EXISTS `base_groupenabledmodule` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` varchar(10) NOT NULL, + `module_name` varchar(20) NOT NULL, + PRIMARY KEY (`id`), + KEY `base_groupenabledmodule_dc00373b` (`group_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `base_userenabledmodule` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `module_name` varchar(20) NOT NULL, + PRIMARY KEY (`id`), + KEY `base_userenabledmodule_ee0cafa2` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `base_userlastlogin` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `last_login` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `base_userlastlogin_ee0cafa2` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/2.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/2.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..9d0dae3e45 --- /dev/null +++ b/scripts/upgrade/sql/2.0.0/sqlite3/seahub.sql @@ -0,0 +1,20 @@ +CREATE TABLE IF NOT EXISTS "base_groupenabledmodule" ( + "id" integer NOT NULL PRIMARY KEY, + "group_id" varchar(10) NOT NULL, + "module_name" varchar(20) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "base_userenabledmodule" ( + "id" integer NOT NULL PRIMARY KEY, + "username" varchar(255) NOT NULL, + "module_name" varchar(20) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "base_userlastlogin" ( + "id" integer NOT NULL PRIMARY KEY, + "username" varchar(255) NOT NULL, + "last_login" datetime NOT NULL +); + +CREATE INDEX IF NOT EXISTS "base_groupenabledmodule_dc00373b" ON "base_groupenabledmodule" ("group_id"); +CREATE INDEX IF NOT EXISTS "base_userenabledmodule_ee0cafa2" ON "base_userenabledmodule" ("username"); diff --git a/scripts/upgrade/sql/2.1.0/mysql/seahub.sql b/scripts/upgrade/sql/2.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..391b688beb --- /dev/null +++ b/scripts/upgrade/sql/2.1.0/mysql/seahub.sql @@ -0,0 +1,53 @@ +CREATE TABLE IF NOT EXISTS `captcha_captchastore` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `challenge` varchar(32) NOT NULL, + `response` varchar(32) NOT NULL, + `hashkey` varchar(40) NOT NULL, + `expiration` datetime NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `hashkey` (`hashkey`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +DROP TABLE IF EXISTS `notifications_usernotification`; +CREATE TABLE IF NOT EXISTS `notifications_usernotification` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `to_user` varchar(255) NOT NULL, + `msg_type` varchar(30) NOT NULL, + `detail` longtext NOT NULL, + `timestamp` datetime NOT NULL, + `seen` tinyint(1) NOT NULL, + PRIMARY KEY (`id`), + KEY `notifications_usernotification_bc172800` (`to_user`), + KEY `notifications_usernotification_265e5521` (`msg_type`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `options_useroptions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `email` varchar(255) NOT NULL, + `option_key` varchar(50) NOT NULL, + `option_val` varchar(50) NOT NULL, + PRIMARY KEY (`id`), + KEY `options_useroptions_830a6ccb` (`email`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `profile_detailedprofile` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(255) NOT NULL, + `department` varchar(512) NOT NULL, + `telephone` varchar(100) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `share_uploadlinkshare` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `token` varchar(10) NOT NULL, + `ctime` datetime NOT NULL, + `view_cnt` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `token` (`token`), + KEY `share_uploadlinkshare_ee0cafa2` (`username`), + KEY `share_uploadlinkshare_2059abe4` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/2.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/2.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..f6b0c9c115 --- /dev/null +++ b/scripts/upgrade/sql/2.1.0/sqlite3/seahub.sql @@ -0,0 +1,48 @@ +CREATE TABLE IF NOT EXISTS "captcha_captchastore" ( + "id" integer NOT NULL PRIMARY KEY, + "challenge" varchar(32) NOT NULL, + "response" varchar(32) NOT NULL, + "hashkey" varchar(40) NOT NULL UNIQUE, + "expiration" datetime NOT NULL +); + +DROP TABLE IF EXISTS "notifications_usernotification"; +CREATE TABLE IF NOT EXISTS "notifications_usernotification" ( + "id" integer NOT NULL PRIMARY KEY, + "to_user" varchar(255) NOT NULL, + "msg_type" varchar(30) NOT NULL, + "detail" text NOT NULL, + "timestamp" datetime NOT NULL, + "seen" bool NOT NULL +); + +CREATE INDEX IF NOT EXISTS "notifications_usernotification_265e5521" ON "notifications_usernotification" ("msg_type"); +CREATE INDEX IF NOT EXISTS "notifications_usernotification_bc172800" ON "notifications_usernotification" ("to_user"); + +CREATE TABLE IF NOT EXISTS "options_useroptions" ( + "id" integer NOT NULL PRIMARY KEY, + "email" varchar(255) NOT NULL, + "option_key" varchar(50) NOT NULL, + "option_val" varchar(50) NOT NULL +); +CREATE INDEX IF NOT EXISTS "options_useroptions_830a6ccb" ON "options_useroptions" ("email"); + +CREATE TABLE IF NOT EXISTS "profile_detailedprofile" ( + "id" integer NOT NULL PRIMARY KEY, + "user" varchar(255) NOT NULL, + "department" varchar(512) NOT NULL, + "telephone" varchar(100) NOT NULL +); +CREATE INDEX IF NOT EXISTS "profile_detailedprofile_6340c63c" ON "profile_detailedprofile" ("user"); + +CREATE TABLE IF NOT EXISTS "share_uploadlinkshare" ( + "id" integer NOT NULL PRIMARY KEY, + "username" varchar(255) NOT NULL, + "repo_id" varchar(36) NOT NULL, + "path" text NOT NULL, + "token" varchar(10) NOT NULL UNIQUE, + "ctime" datetime NOT NULL, + "view_cnt" integer NOT NULL +); +CREATE INDEX IF NOT EXISTS "share_uploadlinkshare_2059abe4" ON "share_uploadlinkshare" ("repo_id"); +CREATE INDEX IF NOT EXISTS "share_uploadlinkshare_ee0cafa2" ON "share_uploadlinkshare" ("username"); diff --git a/scripts/upgrade/sql/2.2.0/mysql/ccnet.sql b/scripts/upgrade/sql/2.2.0/mysql/ccnet.sql new file mode 100644 index 0000000000..88385eedd6 --- /dev/null +++ b/scripts/upgrade/sql/2.2.0/mysql/ccnet.sql @@ -0,0 +1,2 @@ +ALTER TABLE EmailUser MODIFY passwd varchar(256); + diff --git a/scripts/upgrade/sql/3.0.0/mysql/seahub.sql b/scripts/upgrade/sql/3.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..6ce79cde8b --- /dev/null +++ b/scripts/upgrade/sql/3.0.0/mysql/seahub.sql @@ -0,0 +1,23 @@ +CREATE TABLE IF NOT EXISTS `api2_tokenv2` ( + `key` varchar(40) NOT NULL, + `user` varchar(255) NOT NULL, + `platform` varchar(32) NOT NULL, + `device_id` varchar(40) NOT NULL, + `device_name` varchar(40) NOT NULL, + `platform_version` varchar(16) NOT NULL, + `client_version` varchar(16) NOT NULL, + `last_accessed` datetime NOT NULL, + `last_login_ip` char(39) DEFAULT NULL, + PRIMARY KEY (`key`), + UNIQUE KEY `user` (`user`,`platform`,`device_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `sysadmin_extra_userloginlog` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `login_date` datetime NOT NULL, + `login_ip` varchar(20) NOT NULL, + PRIMARY KEY (`id`), + KEY `sysadmin_extra_userloginlog_ee0cafa2` (`username`), + KEY `sysadmin_extra_userloginlog_c8db99ec` (`login_date`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 diff --git a/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..161c9259da --- /dev/null +++ b/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql @@ -0,0 +1,21 @@ +CREATE TABLE IF NOT EXISTS "api2_tokenv2" ( + "key" varchar(40) NOT NULL PRIMARY KEY, + "user" varchar(255) NOT NULL, + "platform" varchar(32) NOT NULL, + "device_id" varchar(40) NOT NULL, + "device_name" varchar(40) NOT NULL, + "platform_version" varchar(16) NOT NULL, + "client_version" varchar(16) NOT NULL, + "last_accessed" datetime NOT NULL, + "last_login_ip" char(39), + UNIQUE ("user", "platform", "device_id") +); + +CREATE TABLE IF NOT EXISTS "sysadmin_extra_userloginlog" ( + "id" integer NOT NULL PRIMARY KEY, + "username" varchar(255) NOT NULL, + "login_date" datetime NOT NULL, + "login_ip" varchar(20) NOT NULL +); +CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_c8db99ec" ON "sysadmin_extra_userloginlog" ("login_date"); +CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_ee0cafa2" ON "sysadmin_extra_userloginlog" ("username"); diff --git a/scripts/upgrade/sql/3.1.0/mysql/seahub.sql b/scripts/upgrade/sql/3.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..ad139cd160 --- /dev/null +++ b/scripts/upgrade/sql/3.1.0/mysql/seahub.sql @@ -0,0 +1,20 @@ +alter table message_usermessage add column sender_deleted_at datetime DEFAULT NULL; +alter table message_usermessage add column recipient_deleted_at datetime DEFAULT NULL; + +alter table share_fileshare add column password varchar(128); +alter table share_fileshare add column expire_date datetime; +alter table share_uploadlinkshare add column password varchar(128); +alter table share_uploadlinkshare add column expire_date datetime; +alter table profile_profile add column lang_code varchar(50) DEFAULT NULL; + +CREATE TABLE IF NOT EXISTS `share_orgfileshare` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `org_id` int(11) NOT NULL, + `file_share_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `file_share_id` (`file_share_id`), + KEY `share_orgfileshare_944dadb6` (`org_id`), + CONSTRAINT `file_share_id_refs_id_bd2fd9f8` FOREIGN KEY (`file_share_id`) REFERENCES `share_fileshare` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `base_userstarredfiles` ADD INDEX `base_userstarredfiles_email` (email); diff --git a/scripts/upgrade/sql/3.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/3.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..42846156a4 --- /dev/null +++ b/scripts/upgrade/sql/3.1.0/sqlite3/seahub.sql @@ -0,0 +1,16 @@ +alter table "message_usermessage" add column "sender_deleted_at" datetime; +alter table "message_usermessage" add column "recipient_deleted_at" datetime; +alter table "share_fileshare" add column "password" varchar(128); +alter table "share_fileshare" add column "expire_date" datetime; +alter table "share_uploadlinkshare" add column "password" varchar(128); +alter table "share_uploadlinkshare" add column "expire_date" datetime; +alter table "profile_profile" add column "lang_code" varchar(50); + +CREATE TABLE IF NOT EXISTS "share_orgfileshare" ( + "id" integer NOT NULL PRIMARY KEY, + "org_id" integer NOT NULL, + "file_share_id" integer NOT NULL UNIQUE REFERENCES "share_fileshare" ("id") +); +CREATE INDEX IF NOT EXISTS "share_orgfileshare_944dadb6" ON "share_orgfileshare" ("org_id"); + +CREATE INDEX IF NOT EXISTS "base_userstarredfiles_email" on "base_userstarredfiles" ("email"); diff --git a/scripts/upgrade/sql/4.1.0/mysql/ccnet.sql b/scripts/upgrade/sql/4.1.0/mysql/ccnet.sql new file mode 100644 index 0000000000..42e78881ea --- /dev/null +++ b/scripts/upgrade/sql/4.1.0/mysql/ccnet.sql @@ -0,0 +1 @@ +ALTER TABLE `Group` ADD type VARCHAR(32); diff --git a/scripts/upgrade/sql/4.1.0/mysql/seafile.sql b/scripts/upgrade/sql/4.1.0/mysql/seafile.sql new file mode 100644 index 0000000000..f82e4b75a4 --- /dev/null +++ b/scripts/upgrade/sql/4.1.0/mysql/seafile.sql @@ -0,0 +1,30 @@ +ALTER TABLE SharedRepo MODIFY from_email VARCHAR(255); +ALTER TABLE SharedRepo MODIFY to_email VARCHAR(255); +ALTER TABLE SharedRepo ADD INDEX (from_email); +ALTER TABLE SharedRepo ADD INDEX (to_email); + +CREATE TABLE IF NOT EXISTS OrgSharedRepo ( + id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INT, + repo_id CHAR(37) , + from_email VARCHAR(255), + to_email VARCHAR(255), + permission CHAR(15), + INDEX (org_id, repo_id), + INDEX(from_email), + INDEX(to_email) +) ENGINE=INNODB; + +ALTER TABLE OrgSharedRepo MODIFY from_email VARCHAR(255); +ALTER TABLE OrgSharedRepo MODIFY to_email VARCHAR(255); + +CREATE TABLE IF NOT EXISTS RepoTrash ( + repo_id CHAR(36) PRIMARY KEY, + repo_name VARCHAR(255), + head_id CHAR(40), + owner_id VARCHAR(255), + size BIGINT(20), + org_id INTEGER, + INDEX(owner_id), + INDEX(org_id) +) ENGINE=INNODB; diff --git a/scripts/upgrade/sql/4.1.0/sqlite3/ccnet/groupmgr.sql b/scripts/upgrade/sql/4.1.0/sqlite3/ccnet/groupmgr.sql new file mode 100644 index 0000000000..42e78881ea --- /dev/null +++ b/scripts/upgrade/sql/4.1.0/sqlite3/ccnet/groupmgr.sql @@ -0,0 +1 @@ +ALTER TABLE `Group` ADD type VARCHAR(32); diff --git a/scripts/upgrade/sql/4.1.0/sqlite3/seafile.sql b/scripts/upgrade/sql/4.1.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..71147f2416 --- /dev/null +++ b/scripts/upgrade/sql/4.1.0/sqlite3/seafile.sql @@ -0,0 +1,14 @@ +CREATE INDEX IF NOT EXISTS FromEmailIndex on SharedRepo (from_email); +CREATE INDEX IF NOT EXISTS ToEmailIndex on SharedRepo (to_email); + +CREATE TABLE IF NOT EXISTS RepoTrash ( + repo_id CHAR(36) PRIMARY KEY, + repo_name VARCHAR(255), + head_id CHAR(40), + owner_id VARCHAR(255), + size BIGINT UNSIGNED, + org_id INTEGER +); + +CREATE INDEX IF NOT EXISTS repotrash_owner_id_idx ON RepoTrash(owner_id); +CREATE INDEX IF NOT EXISTS repotrash_org_id_idx ON RepoTrash(org_id); diff --git a/scripts/upgrade/sql/4.2.0/mysql/seafile.sql b/scripts/upgrade/sql/4.2.0/mysql/seafile.sql new file mode 100644 index 0000000000..45a06dd949 --- /dev/null +++ b/scripts/upgrade/sql/4.2.0/mysql/seafile.sql @@ -0,0 +1 @@ +alter table RepoTrash add del_time BIGINT; diff --git a/scripts/upgrade/sql/4.2.0/mysql/seahub.sql b/scripts/upgrade/sql/4.2.0/mysql/seahub.sql new file mode 100644 index 0000000000..b62681844d --- /dev/null +++ b/scripts/upgrade/sql/4.2.0/mysql/seahub.sql @@ -0,0 +1,18 @@ +CREATE TABLE IF NOT EXISTS `base_clientlogintoken` ( + `token` varchar(32) NOT NULL, + `username` varchar(255) NOT NULL, + `timestamp` datetime NOT NULL, + PRIMARY KEY (`token`), + KEY `base_clientlogintoken_ee0cafa2` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `organizations_orgmemberquota` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `org_id` int(11) NOT NULL, + `quota` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `organizations_orgmemberquota_944dadb6` (`org_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +REPLACE INTO django_content_type VALUES(44,'client login token','base','clientlogintoken'); +REPLACE INTO django_content_type VALUES(45,'org member quota','organizations','orgmemberquota'); diff --git a/scripts/upgrade/sql/4.2.0/sqlite3/seafile.sql b/scripts/upgrade/sql/4.2.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..45a06dd949 --- /dev/null +++ b/scripts/upgrade/sql/4.2.0/sqlite3/seafile.sql @@ -0,0 +1 @@ +alter table RepoTrash add del_time BIGINT; diff --git a/scripts/upgrade/sql/4.2.0/sqlite3/seahub.sql b/scripts/upgrade/sql/4.2.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..6bd3b520aa --- /dev/null +++ b/scripts/upgrade/sql/4.2.0/sqlite3/seahub.sql @@ -0,0 +1,18 @@ +CREATE TABLE IF NOT EXISTS "base_clientlogintoken" ( + "token" varchar(32) NOT NULL PRIMARY KEY, + "username" varchar(255) NOT NULL, + "timestamp" datetime NOT NULL +); + +CREATE INDEX IF NOT EXISTS "base_clientlogintoken_ee0cafa2" ON "base_clientlogintoken" ("username"); + +CREATE TABLE IF NOT EXISTS "organizations_orgmemberquota" ( + "id" integer NOT NULL PRIMARY KEY, + "org_id" integer NOT NULL, + "quota" integer NOT NULL +); + +CREATE INDEX IF NOT EXISTS "organizations_orgmemberquota_944dadb6" ON "organizations_orgmemberquota" ("org_id"); + +REPLACE INTO "django_content_type" VALUES(44,'client login token','base','clientlogintoken'); +REPLACE INTO "django_content_type" VALUES(45,'org member quota','organizations','orgmemberquota'); diff --git a/scripts/upgrade/sql/4.3.0/mysql/.gitkeep b/scripts/upgrade/sql/4.3.0/mysql/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/upgrade/sql/4.3.0/sqlite3/.gitkeep b/scripts/upgrade/sql/4.3.0/sqlite3/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/upgrade/sql/4.4.0/mysql/.gitkeep b/scripts/upgrade/sql/4.4.0/mysql/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/upgrade/sql/4.4.0/sqlite3/.gitkeep b/scripts/upgrade/sql/4.4.0/sqlite3/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/upgrade/sql/5.0.0/mysql/seahub.sql b/scripts/upgrade/sql/5.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..c51a14d698 --- /dev/null +++ b/scripts/upgrade/sql/5.0.0/mysql/seahub.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS `constance_config` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `key` varchar(255) NOT NULL, + `value` longtext NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `key` (`key`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `profile_profile` ADD `login_id` varchar(225) DEFAULT NULL; +ALTER TABLE `profile_profile` ADD `contact_email` varchar(225) DEFAULT NULL; +ALTER TABLE `profile_profile` ADD `institution` varchar(225) DEFAULT NULL; + +ALTER TABLE `profile_profile` ADD UNIQUE INDEX (`login_id`); +ALTER TABLE `profile_profile` ADD INDEX (`contact_email`); +ALTER TABLE `profile_profile` ADD INDEX (`institution`); + + diff --git a/scripts/upgrade/sql/5.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/5.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..a6714968da --- /dev/null +++ b/scripts/upgrade/sql/5.0.0/sqlite3/seahub.sql @@ -0,0 +1,13 @@ +CREATE TABLE IF NOT EXISTS "constance_config" ( + "id" integer NOT NULL PRIMARY KEY, + "key" varchar(255) NOT NULL UNIQUE, + "value" text NOT NULL +); + +ALTER TABLE "profile_profile" ADD COLUMN "login_id" varchar(225); +ALTER TABLE "profile_profile" ADD COLUMN "contact_email" varchar(225); +ALTER TABLE "profile_profile" ADD COLUMN "institution" varchar(225); + +CREATE UNIQUE INDEX "profile_profile_1b43c217" ON "profile_profile" ("login_id"); +CREATE INDEX "profile_profile_3b46cb17" ON "profile_profile" ("contact_email"); +CREATE INDEX "profile_profile_71bbc151" ON "profile_profile" ("institution"); diff --git a/scripts/upgrade/sql/5.1.0/mysql/seafile.sql b/scripts/upgrade/sql/5.1.0/mysql/seafile.sql new file mode 100644 index 0000000000..2742df03eb --- /dev/null +++ b/scripts/upgrade/sql/5.1.0/mysql/seafile.sql @@ -0,0 +1 @@ +alter table RepoTokenPeerInfo add client_ver varchar(20); \ No newline at end of file diff --git a/scripts/upgrade/sql/5.1.0/mysql/seahub.sql b/scripts/upgrade/sql/5.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..056fd8a3d0 --- /dev/null +++ b/scripts/upgrade/sql/5.1.0/mysql/seahub.sql @@ -0,0 +1,124 @@ +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + + +CREATE TABLE IF NOT EXISTS `post_office_attachment` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `file` varchar(100) NOT NULL, + `name` varchar(255) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `post_office_attachment_emails` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `attachment_id` int(11) NOT NULL, + `email_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `attachment_id` (`attachment_id`,`email_id`), + KEY `post_office_attachment_emails_4be595e7` (`attachment_id`), + KEY `post_office_attachment_emails_830a6ccb` (`email_id`), + CONSTRAINT `attachment_id_refs_id_2d59d8fc` FOREIGN KEY (`attachment_id`) REFERENCES `post_office_attachment` (`id`), + CONSTRAINT `email_id_refs_id_061d81d8` FOREIGN KEY (`email_id`) REFERENCES `post_office_email` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `post_office_emailtemplate` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(255) NOT NULL, + `description` longtext NOT NULL, + `created` datetime NOT NULL, + `last_updated` datetime NOT NULL, + `subject` varchar(255) NOT NULL, + `content` longtext NOT NULL, + `html_content` longtext NOT NULL, + `language` varchar(12) NOT NULL, + `default_template_id` int(11) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `language` (`language`,`default_template_id`), + KEY `post_office_emailtemplate_84c7951d` (`default_template_id`), + CONSTRAINT `default_template_id_refs_id_a2bc649e` FOREIGN KEY (`default_template_id`) REFERENCES `post_office_emailtemplate` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `post_office_email` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `from_email` varchar(254) NOT NULL, + `to` longtext NOT NULL, + `cc` longtext NOT NULL, + `bcc` longtext NOT NULL, + `subject` varchar(255) NOT NULL, + `message` longtext NOT NULL, + `html_message` longtext NOT NULL, + `status` smallint(5) unsigned DEFAULT NULL, + `priority` smallint(5) unsigned DEFAULT NULL, + `created` datetime NOT NULL, + `last_updated` datetime NOT NULL, + `scheduled_time` datetime DEFAULT NULL, + `headers` longtext, + `template_id` int(11) DEFAULT NULL, + `context` longtext, + `backend_alias` varchar(64) NOT NULL, + PRIMARY KEY (`id`), + KEY `post_office_email_48fb58bb` (`status`), + KEY `post_office_email_63b5ea41` (`created`), + KEY `post_office_email_470d4868` (`last_updated`), + KEY `post_office_email_c83ff05e` (`scheduled_time`), + KEY `post_office_email_43d23afc` (`template_id`), + CONSTRAINT `template_id_refs_id_a5d97662` FOREIGN KEY (`template_id`) REFERENCES `post_office_emailtemplate` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE IF NOT EXISTS `post_office_log` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `email_id` int(11) NOT NULL, + `date` datetime NOT NULL, + `status` smallint(5) unsigned NOT NULL, + `exception_type` varchar(255) NOT NULL, + `message` longtext NOT NULL, + PRIMARY KEY (`id`), + KEY `post_office_log_830a6ccb` (`email_id`), + CONSTRAINT `email_id_refs_id_3d87f587` FOREIGN KEY (`email_id`) REFERENCES `post_office_email` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `institutions_institution` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(200) NOT NULL, + `create_time` datetime NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `institutions_institutionadmin` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(254) NOT NULL, + `institution_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `i_institution_id_5f792d6fe9a87ac9_fk_institutions_institution_id` (`institution_id`), + CONSTRAINT `i_institution_id_5f792d6fe9a87ac9_fk_institutions_institution_id` FOREIGN KEY (`institution_id`) REFERENCES `institutions_institution` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `sysadmin_extra_userloginlog` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `login_date` datetime NOT NULL, + `login_ip` varchar(128) NOT NULL, + PRIMARY KEY (`id`), + KEY `sysadmin_extra_userloginlog_14c4b06b` (`username`), + KEY `sysadmin_extra_userloginlog_28ed1ef0` (`login_date`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +ALTER TABLE `sysadmin_extra_userloginlog` MODIFY `login_ip` VARCHAR(128); + +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; diff --git a/scripts/upgrade/sql/5.1.0/sqlite3/seafile.sql b/scripts/upgrade/sql/5.1.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..2742df03eb --- /dev/null +++ b/scripts/upgrade/sql/5.1.0/sqlite3/seafile.sql @@ -0,0 +1 @@ +alter table RepoTokenPeerInfo add client_ver varchar(20); \ No newline at end of file diff --git a/scripts/upgrade/sql/5.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/5.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..6e68aa0609 --- /dev/null +++ b/scripts/upgrade/sql/5.1.0/sqlite3/seahub.sql @@ -0,0 +1,72 @@ +CREATE TABLE IF NOT EXISTS "post_office_attachment" ( + "id" integer NOT NULL PRIMARY KEY, + "file" varchar(100) NOT NULL, + "name" varchar(255) NOT NULL +); +CREATE TABLE IF NOT EXISTS "post_office_attachment_emails" ( + "id" integer NOT NULL PRIMARY KEY, + "attachment_id" integer NOT NULL, + "email_id" integer NOT NULL REFERENCES "post_office_email" ("id"), + UNIQUE ("attachment_id", "email_id") +); +CREATE TABLE IF NOT EXISTS "post_office_email" ( + "id" integer NOT NULL PRIMARY KEY, + "from_email" varchar(254) NOT NULL, + "to" text NOT NULL, + "cc" text NOT NULL, + "bcc" text NOT NULL, + "subject" varchar(255) NOT NULL, + "message" text NOT NULL, + "html_message" text NOT NULL, + "status" smallint unsigned, + "priority" smallint unsigned, + "created" datetime NOT NULL, + "last_updated" datetime NOT NULL, + "scheduled_time" datetime, + "headers" text, + "template_id" integer, + "context" text, + "backend_alias" varchar(64) NOT NULL +); +CREATE TABLE IF NOT EXISTS "post_office_emailtemplate" ( + "id" integer NOT NULL PRIMARY KEY, + "name" varchar(255) NOT NULL, + "description" text NOT NULL, + "created" datetime NOT NULL, + "last_updated" datetime NOT NULL, + "subject" varchar(255) NOT NULL, + "content" text NOT NULL, + "html_content" text NOT NULL, + "language" varchar(12) NOT NULL, + "default_template_id" integer, + UNIQUE ("language", "default_template_id") +); +CREATE TABLE IF NOT EXISTS "post_office_log" ( + "id" integer NOT NULL PRIMARY KEY, + "email_id" integer NOT NULL REFERENCES "post_office_email" ("id"), + "date" datetime NOT NULL, + "status" smallint unsigned NOT NULL, + "exception_type" varchar(255) NOT NULL, + "message" text NOT NULL +); +CREATE TABLE IF NOT EXISTS "institutions_institution" ( + "id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, + "name" varchar(200) NOT NULL, + "create_time" datetime NOT NULL +); +CREATE TABLE IF NOT EXISTS "institutions_institutionadmin" ( + "id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, + "user" varchar(254) NOT NULL, + "institution_id" integer NOT NULL REFERENCES "institutions_institution" ("id") +); + +CREATE INDEX IF NOT EXISTS "post_office_attachment_emails_4be595e7" ON "post_office_attachment_emails" ("attachment_id"); +CREATE INDEX IF NOT EXISTS "post_office_attachment_emails_830a6ccb" ON "post_office_attachment_emails" ("email_id"); +CREATE INDEX IF NOT EXISTS "post_office_email_43d23afc" ON "post_office_email" ("template_id"); +CREATE INDEX IF NOT EXISTS "post_office_email_470d4868" ON "post_office_email" ("last_updated"); +CREATE INDEX IF NOT EXISTS "post_office_email_48fb58bb" ON "post_office_email" ("status"); +CREATE INDEX IF NOT EXISTS "post_office_email_63b5ea41" ON "post_office_email" ("created"); +CREATE INDEX IF NOT EXISTS "post_office_email_c83ff05e" ON "post_office_email" ("scheduled_time"); +CREATE INDEX IF NOT EXISTS "post_office_emailtemplate_84c7951d" ON "post_office_emailtemplate" ("default_template_id"); +CREATE INDEX IF NOT EXISTS "post_office_log_830a6ccb" ON "post_office_log" ("email_id"); +CREATE INDEX "institutions_institutionadmin_a964baeb" ON "institutions_institutionadmin" ("institution_id"); diff --git a/scripts/upgrade/sql/6.0.0/mysql/seahub.sql b/scripts/upgrade/sql/6.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..fe9516a1c6 --- /dev/null +++ b/scripts/upgrade/sql/6.0.0/mysql/seahub.sql @@ -0,0 +1,104 @@ +ALTER TABLE api2_tokenv2 ADD COLUMN wiped_at DATETIME DEFAULT NULL; +ALTER TABLE api2_tokenv2 ADD COLUMN created_at DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00"; + +CREATE TABLE IF NOT EXISTS `base_filecomment` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `parent_path` longtext NOT NULL, + `repo_id_parent_path_md5` varchar(100) NOT NULL, + `item_name` longtext NOT NULL, + `author` varchar(255) NOT NULL, + `comment` longtext NOT NULL, + `created_at` datetime NOT NULL, + `updated_at` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `base_filecomment_9a8c79bf` (`repo_id`), + KEY `base_filecomment_c5bf47d4` (`repo_id_parent_path_md5`), + KEY `base_filecomment_02bd92fa` (`author`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `termsandconditions_termsandconditions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `slug` varchar(50) NOT NULL, + `name` longtext NOT NULL, + `version_number` decimal(6,2) NOT NULL, + `text` longtext, + `info` longtext, + `date_active` datetime DEFAULT NULL, + `date_created` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `termsandconditions_termsandconditions_2dbcba41` (`slug`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `termsandconditions_usertermsandconditions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `ip_address` char(39) DEFAULT NULL, + `date_accepted` datetime NOT NULL, + `terms_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `termsandconditions_usertermsandcon_username_f4ab54cafa29322_uniq` (`username`,`terms_id`), + KEY `e4da106203f3f13ff96409b55de6f515` (`terms_id`), + CONSTRAINT `e4da106203f3f13ff96409b55de6f515` FOREIGN KEY (`terms_id`) REFERENCES `termsandconditions_termsandconditions` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `two_factor_totpdevice` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(255) NOT NULL, + `name` varchar(64) NOT NULL, + `confirmed` tinyint(1) NOT NULL, + `key` varchar(80) NOT NULL, + `step` smallint(5) unsigned NOT NULL, + `t0` bigint(20) NOT NULL, + `digits` smallint(5) unsigned NOT NULL, + `tolerance` smallint(5) unsigned NOT NULL, + `drift` smallint(6) NOT NULL, + `last_t` bigint(20) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `user` (`user`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `two_factor_phonedevice` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(255) NOT NULL, + `name` varchar(64) NOT NULL, + `confirmed` tinyint(1) NOT NULL, + `number` varchar(40) NOT NULL, + `key` varchar(40) NOT NULL, + `method` varchar(4) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `user` (`user`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `two_factor_staticdevice` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(255) NOT NULL, + `name` varchar(64) NOT NULL, + `confirmed` tinyint(1) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `user` (`user`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `two_factor_statictoken` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `token` varchar(16) NOT NULL, + `device_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `two_fac_device_id_55a7b345293a7c6c_fk_two_factor_staticdevice_id` (`device_id`), + KEY `two_factor_statictoken_94a08da1` (`token`), + CONSTRAINT `two_fac_device_id_55a7b345293a7c6c_fk_two_factor_staticdevice_id` FOREIGN KEY (`device_id`) REFERENCES `two_factor_staticdevice` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `invitations_invitation` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `token` varchar(40) NOT NULL, + `inviter` varchar(255) NOT NULL, + `accepter` varchar(255) NOT NULL, + `invite_time` datetime NOT NULL, + `accept_time` datetime DEFAULT NULL, + `invite_type` varchar(20) NOT NULL, + `expire_time` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `invitations_invitation_d5dd16f8` (`inviter`), + KEY `invitations_invitation_token_1961fbb98c05e5fd_uniq` (`token`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/6.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/6.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..46bb396d22 --- /dev/null +++ b/scripts/upgrade/sql/6.0.0/sqlite3/seahub.sql @@ -0,0 +1,24 @@ +CREATE TABLE IF NOT EXISTS "base_filecomment" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "parent_path" text NOT NULL, "repo_id_parent_path_md5" varchar(100) NOT NULL, "item_name" text NOT NULL, "author" varchar(255) NOT NULL, "comment" text NOT NULL, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "base_filecomment_02bd92fa" ON "base_filecomment" ("author"); +CREATE INDEX IF NOT EXISTS "base_filecomment_9a8c79bf" ON "base_filecomment" ("repo_id"); +CREATE INDEX IF NOT EXISTS "base_filecomment_c5bf47d4" ON "base_filecomment" ("repo_id_parent_path_md5"); + +CREATE TABLE IF NOT EXISTS "termsandconditions_termsandconditions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "slug" varchar(50) NOT NULL, "name" text NOT NULL, "version_number" decimal NOT NULL, "text" text NULL, "info" text NULL, "date_active" datetime NULL, "date_created" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "termsandconditions_termsandconditions_2dbcba41" ON "termsandconditions_termsandconditions" ("slug"); + +CREATE TABLE IF NOT EXISTS "termsandconditions_usertermsandconditions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "ip_address" char(39) NULL, "date_accepted" datetime NOT NULL, "terms_id" integer NOT NULL REFERENCES "termsandconditions_termsandconditions" ("id"), UNIQUE ("username", "terms_id")); +CREATE INDEX IF NOT EXISTS "termsandconditions_usertermsandconditions_2ab34720" ON "termsandconditions_usertermsandconditions" ("terms_id"); + +CREATE TABLE IF NOT EXISTS "two_factor_phonedevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL, "number" varchar(40) NOT NULL, "key" varchar(40) NOT NULL, "method" varchar(4) NOT NULL); +CREATE TABLE IF NOT EXISTS "two_factor_staticdevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL); +CREATE TABLE IF NOT EXISTS "two_factor_statictoken" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(16) NOT NULL, "device_id" integer NOT NULL REFERENCES "two_factor_staticdevice" ("id")); +CREATE TABLE IF NOT EXISTS "two_factor_totpdevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL, "key" varchar(80) NOT NULL, "step" smallint unsigned NOT NULL, "t0" bigint NOT NULL, "digits" smallint unsigned NOT NULL, "tolerance" smallint unsigned NOT NULL, "drift" smallint NOT NULL, "last_t" bigint NOT NULL); +CREATE INDEX IF NOT EXISTS "two_factor_statictoken_94a08da1" ON "two_factor_statictoken" ("token"); +CREATE INDEX IF NOT EXISTS "two_factor_statictoken_9379346c" ON "two_factor_statictoken" ("device_id"); + +CREATE TABLE IF NOT EXISTS "invitations_invitation" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(40) NOT NULL, "inviter" varchar(255) NOT NULL, "accepter" varchar(255) NOT NULL, "invite_time" datetime NOT NULL, "accept_time" datetime NULL, "invite_type" varchar(20) NOT NULL, "expire_time" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "invitations_invitation_94a08da1" ON "invitations_invitation" ("token"); +CREATE INDEX IF NOT EXISTS "invitations_invitation_d5dd16f8" ON "invitations_invitation" ("inviter"); + +ALTER TABLE api2_tokenv2 ADD COLUMN wiped_at datetime DEFAULT NULL; +ALTER TABLE api2_tokenv2 ADD COLUMN created_at datetime NOT NULL DEFAULT '1970-01-01 00:00:00'; diff --git a/scripts/upgrade/sql/6.1.0/mysql/seahub.sql b/scripts/upgrade/sql/6.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..4ffd944b7a --- /dev/null +++ b/scripts/upgrade/sql/6.1.0/mysql/seahub.sql @@ -0,0 +1,23 @@ +ALTER TABLE `share_fileshare` MODIFY token varchar(100); +ALTER TABLE `share_fileshare` ADD COLUMN `permission` varchar(50) NOT NULL DEFAULT 'view_download'; +ALTER TABLE `share_uploadlinkshare` MODIFY token varchar(100); + +CREATE TABLE IF NOT EXISTS `institutions_institutionquota` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `quota` bigint(20) NOT NULL, + `institution_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `i_institution_id_2ca7c89373390e2c_fk_institutions_institution_id` (`institution_id`), + CONSTRAINT `i_institution_id_2ca7c89373390e2c_fk_institutions_institution_id` FOREIGN KEY (`institution_id`) REFERENCES `institutions_institution` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `admin_log_adminlog` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `email` varchar(254) NOT NULL, + `operation` varchar(255) NOT NULL, + `detail` longtext NOT NULL, + `datetime` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `admin_log_adminlog_0c83f57c` (`email`), + KEY `admin_log_adminlog_f7235a61` (`operation`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; \ No newline at end of file diff --git a/scripts/upgrade/sql/6.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/6.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..bca643581c --- /dev/null +++ b/scripts/upgrade/sql/6.1.0/sqlite3/seahub.sql @@ -0,0 +1,9 @@ +alter table share_fileshare add column permission varchar(50) not null default 'view_download'; + +CREATE TABLE "admin_log_adminlog" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "email" varchar(254) NOT NULL, "operation" varchar(255) NOT NULL, "detail" text NOT NULL, "datetime" datetime NOT NULL); + +CREATE TABLE "institutions_institutionquota" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "quota" bigint NOT NULL, "institution_id" integer NOT NULL REFERENCES "institutions_institution" ("id")); + +CREATE INDEX "admin_log_adminlog_0c83f57c" ON "admin_log_adminlog" ("email"); +CREATE INDEX "admin_log_adminlog_f7235a61" ON "admin_log_adminlog" ("operation"); +CREATE INDEX "institutions_institutionquota_a964baeb" ON "institutions_institutionquota" ("institution_id"); \ No newline at end of file diff --git a/scripts/upgrade/sql/6.2.0/mysql/ccnet.sql b/scripts/upgrade/sql/6.2.0/mysql/ccnet.sql new file mode 100644 index 0000000000..ac5d5a2127 --- /dev/null +++ b/scripts/upgrade/sql/6.2.0/mysql/ccnet.sql @@ -0,0 +1,4 @@ +alter table LDAPUsers add column reference_id VARCHAR(255); +alter table EmailUser add column reference_id VARCHAR(255); +ALTER TABLE `LDAPUsers` ADD UNIQUE (`reference_id`); +ALTER TABLE `EmailUser` ADD UNIQUE (`reference_id`); \ No newline at end of file diff --git a/scripts/upgrade/sql/6.2.0/mysql/seahub.sql b/scripts/upgrade/sql/6.2.0/mysql/seahub.sql new file mode 100644 index 0000000000..9b0b070766 --- /dev/null +++ b/scripts/upgrade/sql/6.2.0/mysql/seahub.sql @@ -0,0 +1,88 @@ +CREATE TABLE IF NOT EXISTS `revision_tag_tags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `revision_tag_revisiontags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `revision_id` varchar(255) NOT NULL, + `tag_id` int(11) NOT NULL, + `username` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + KEY `revision_tag_rev_tag_id_37c2d76166c50597_fk_revision_tag_tags_id` (`tag_id`), + KEY `revision_tag_revisiontags_9a8c79bf` (`repo_id`), + KEY `revision_tag_revisiontags_5de09a8d` (`revision_id`), + KEY `revision_tag_revisiontags_14c4b06b` (`username`), + CONSTRAINT `revision_tag_rev_tag_id_37c2d76166c50597_fk_revision_tag_tags_id` FOREIGN KEY (`tag_id`) REFERENCES `revision_tag_tags` (`id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `share_extrasharepermission` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `share_to` varchar(255) NOT NULL, + `permission` varchar(30) NOT NULL, + PRIMARY KEY (`id`), + KEY `share_extrasharepermission_9a8c79bf` (`repo_id`), + KEY `share_extrasharepermission_e4fb1dad` (`share_to`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `share_extragroupssharepermission` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `group_id` int(11) NOT NULL, + `permission` varchar(30) NOT NULL, + PRIMARY KEY (`id`), + KEY `share_extragroupssharepermission_9a8c79bf` (`repo_id`), + KEY `share_extragroupssharepermission_0e939a4f` (`group_id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `tags_fileuuidmap` ( + `uuid` char(32) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `repo_id_parent_path_md5` varchar(100) NOT NULL, + `parent_path` longtext NOT NULL, + `filename` varchar(1024) NOT NULL, + `is_dir` tinyint(1) NOT NULL, + PRIMARY KEY (`uuid`), + KEY `tags_fileuuidmap_9a8c79bf` (`repo_id`), + KEY `tags_fileuuidmap_c5bf47d4` (`repo_id_parent_path_md5`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `tags_tags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `tags_filetag` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `uuid_id` char(32) NOT NULL, + `tag_id` int(11) NOT NULL, + `username` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + KEY `tags_filetag_uuid_id_5e2dc8ebbab85301_fk_tags_fileuuidmap_uuid` (`uuid_id`), + KEY `tags_filetag_tag_id_39c4746ee9d70b71_fk_tags_tags_id` (`tag_id`), + CONSTRAINT `tags_filetag_tag_id_39c4746ee9d70b71_fk_tags_tags_id` FOREIGN KEY (`tag_id`) REFERENCES `tags_tags` (`id`), + CONSTRAINT `tags_filetag_uuid_id_5e2dc8ebbab85301_fk_tags_fileuuidmap_uuid` FOREIGN KEY (`uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `role_permissions_adminrole` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `email` varchar(254) NOT NULL, + `role` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `email` (`email`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `sysadmin_extra_userloginlog` ADD COLUMN `login_success` tinyint(1) NOT NULL default 1; +ALTER TABLE `profile_profile` ADD COLUMN `list_in_address_book` tinyint(1) NOT NULL default 0; +ALTER TABLE `profile_profile` ADD INDEX `profile_profile_3d5d3631` (`list_in_address_book`); +ALTER TABLE `FileAudit` ADD INDEX `fileaudit_timestamp` (`timestamp`); +ALTER TABLE `Event` ADD INDEX `event_timestamp` (`timestamp`); +ALTER TABLE `UserTrafficStat` ADD INDEX `usertrafficstat_timestamp` (`month`); +ALTER TABLE `FileUpdate` ADD INDEX `fileupdate_timestamp` (`timestamp`); diff --git a/scripts/upgrade/sql/6.2.0/sqlite3/ccnet/usermgr.sql b/scripts/upgrade/sql/6.2.0/sqlite3/ccnet/usermgr.sql new file mode 100644 index 0000000000..e548e09d69 --- /dev/null +++ b/scripts/upgrade/sql/6.2.0/sqlite3/ccnet/usermgr.sql @@ -0,0 +1,4 @@ +alter table LDAPUsers add column reference_id VARCHAR(255); +alter table EmailUser add column reference_id VARCHAR(255); +CREATE UNIQUE INDEX IF NOT EXISTS reference_id_index on EmailUser (reference_id); +CREATE UNIQUE INDEX IF NOT EXISTS ldapusers_reference_id_index on LDAPUsers(reference_id); \ No newline at end of file diff --git a/scripts/upgrade/sql/6.2.0/sqlite3/seahub.sql b/scripts/upgrade/sql/6.2.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..8d60dbccc9 --- /dev/null +++ b/scripts/upgrade/sql/6.2.0/sqlite3/seahub.sql @@ -0,0 +1,24 @@ +alter table sysadmin_extra_userloginlog add column login_success bool not null default 1; +alter table profile_profile add column list_in_address_book bool not null default 0; + +CREATE TABLE IF NOT EXISTS "share_extragroupssharepermission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "group_id" integer NOT NULL, "permission" varchar(30) NOT NULL); +CREATE TABLE IF NOT EXISTS "share_extrasharepermission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "share_to" varchar(255) NOT NULL, "permission" varchar(30) NOT NULL); +CREATE TABLE IF NOT EXISTS "tags_fileuuidmap" ("uuid" char(32) NOT NULL PRIMARY KEY, "repo_id" varchar(36) NOT NULL, "repo_id_parent_path_md5" varchar(100) NOT NULL, "parent_path" text NOT NULL, "filename" varchar(1024) NOT NULL, "is_dir" bool NOT NULL); +CREATE TABLE IF NOT EXISTS "tags_tags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE); +CREATE TABLE IF NOT EXISTS "tags_filetag" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"), "tag_id" integer NOT NULL REFERENCES "tags_tags" ("id"), "username" varchar(255) NOT NULL); +CREATE TABLE IF NOT EXISTS "revision_tag_tags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE); +CREATE TABLE IF NOT EXISTS "revision_tag_revisiontags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "path" text NOT NULL, "revision_id" varchar(255) NOT NULL, "tag_id" integer NOT NULL REFERENCES "revision_tag_tags" ("id"), "username" varchar(255) NOT NULL); +CREATE TABLE IF NOT EXISTS "role_permissions_adminrole" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "email" varchar(254) NOT NULL UNIQUE, "role" varchar(255) NOT NULL); +CREATE INDEX IF NOT EXISTS "share_extragroupssharepermission_9a8c79bf" ON "share_extragroupssharepermission" ("repo_id"); +CREATE INDEX IF NOT EXISTS "share_extragroupssharepermission_0e939a4f" ON "share_extragroupssharepermission" ("group_id"); +CREATE INDEX IF NOT EXISTS "share_extrasharepermission_9a8c79bf" ON "share_extrasharepermission" ("repo_id"); +CREATE INDEX IF NOT EXISTS "share_extrasharepermission_e4fb1dad" ON "share_extrasharepermission" ("share_to"); +CREATE INDEX IF NOT EXISTS "tags_fileuuidmap_9a8c79bf" ON "tags_fileuuidmap" ("repo_id"); +CREATE INDEX IF NOT EXISTS "tags_fileuuidmap_c5bf47d4" ON "tags_fileuuidmap" ("repo_id_parent_path_md5"); +CREATE INDEX IF NOT EXISTS "tags_filetag_10634818" ON "tags_filetag" ("uuid_id"); +CREATE INDEX IF NOT EXISTS "tags_filetag_76f094bc" ON "tags_filetag" ("tag_id"); +CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_9a8c79bf" ON "revision_tag_revisiontags" ("repo_id"); +CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_5de09a8d" ON "revision_tag_revisiontags" ("revision_id"); +CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_76f094bc" ON "revision_tag_revisiontags" ("tag_id"); +CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_14c4b06b" ON "revision_tag_revisiontags" ("username"); +CREATE INDEX IF NOT EXISTS "profile_profile_3d5d3631" ON "profile_profile" ("list_in_address_book"); \ No newline at end of file diff --git a/scripts/upgrade/sql/6.3.0/mysql/ccnet.sql b/scripts/upgrade/sql/6.3.0/mysql/ccnet.sql new file mode 100644 index 0000000000..58f187a587 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/mysql/ccnet.sql @@ -0,0 +1,24 @@ +CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS GroupStructure (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER, path VARCHAR(1024), UNIQUE INDEX(group_id))ENGINE=INNODB; + +alter table `Group` add column parent_group_id INTEGER default 0; -- Replace `Group` if you configured table `Group` to another name. + +ALTER TABLE Binding ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE LDAPConfig ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE OrgUser DROP primary key; +ALTER TABLE OrgUser ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgUser ADD UNIQUE (org_id, email); + +ALTER TABLE OrgGroup DROP primary key; +ALTER TABLE OrgGroup ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgGroup ADD UNIQUE (org_id, group_id); + +ALTER TABLE GroupUser DROP primary key; +ALTER TABLE GroupUser ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE GroupUser ADD UNIQUE (group_id, user_name); + +ALTER TABLE GroupDNPair ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + diff --git a/scripts/upgrade/sql/6.3.0/mysql/seafevents.sql b/scripts/upgrade/sql/6.3.0/mysql/seafevents.sql new file mode 100644 index 0000000000..b41ed2c844 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/mysql/seafevents.sql @@ -0,0 +1,4 @@ +ALTER TABLE Event ADD INDEX `ix_event_timestamp` (`timestamp`); +ALTER TABLE FileAudit ADD INDEX `ix_FileAudit_timestamp` (`timestamp`); +ALTER TABLE FileUpdate ADD INDEX `ix_FileUpdate_timestamp` (`timestamp`); +ALTER TABLE UserTrafficStat ADD INDEX `ix_UserTrafficStat_month` (`month`); diff --git a/scripts/upgrade/sql/6.3.0/mysql/seafile.sql b/scripts/upgrade/sql/6.3.0/mysql/seafile.sql new file mode 100644 index 0000000000..4405c44033 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/mysql/seafile.sql @@ -0,0 +1,136 @@ +CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, name VARCHAR(255) NOT NULL, update_time BIGINT, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255)) ENGINE=INNODB; + +ALTER TABLE Repo DROP primary key; +ALTER TABLE Repo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE Repo ADD UNIQUE (repo_id); + +ALTER TABLE RepoOwner DROP primary key; +ALTER TABLE RepoOwner ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoOwner ADD UNIQUE (repo_id); + +ALTER TABLE RepoGroup DROP primary key; +ALTER TABLE RepoGroup ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoGroup ADD UNIQUE (group_id, repo_id); + +ALTER TABLE InnerPubRepo DROP primary key; +ALTER TABLE InnerPubRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE InnerPubRepo ADD UNIQUE (repo_id); + +ALTER TABLE RepoUserToken DROP primary key; +ALTER TABLE RepoUserToken ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoUserToken ADD UNIQUE (repo_id, token); + +ALTER TABLE RepoTokenPeerInfo DROP primary key; +ALTER TABLE RepoTokenPeerInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoTokenPeerInfo ADD UNIQUE (token); + +ALTER TABLE RepoHead DROP primary key; +ALTER TABLE RepoHead ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoHead ADD UNIQUE (repo_id); + +ALTER TABLE RepoSize DROP primary key; +ALTER TABLE RepoSize ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoSize ADD UNIQUE (repo_id); + +ALTER TABLE RepoHistoryLimit DROP primary key; +ALTER TABLE RepoHistoryLimit ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoHistoryLimit ADD UNIQUE (repo_id); + +ALTER TABLE RepoValidSince DROP primary key; +ALTER TABLE RepoValidSince ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoValidSince ADD UNIQUE (repo_id); + +ALTER TABLE WebAP DROP primary key; +ALTER TABLE WebAP ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE WebAP ADD UNIQUE (repo_id); + +ALTER TABLE VirtualRepo DROP primary key; +ALTER TABLE VirtualRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE VirtualRepo ADD UNIQUE (repo_id); + +ALTER TABLE GarbageRepos DROP primary key; +ALTER TABLE GarbageRepos ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE GarbageRepos ADD UNIQUE (repo_id); + +ALTER TABLE RepoTrash DROP primary key; +ALTER TABLE RepoTrash ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoTrash ADD UNIQUE (repo_id); + +ALTER TABLE RepoFileCount DROP primary key; +ALTER TABLE RepoFileCount ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoFileCount ADD UNIQUE (repo_id); + +ALTER TABLE RepoInfo DROP primary key; +ALTER TABLE RepoInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoInfo ADD UNIQUE (repo_id); + +ALTER TABLE UserQuota DROP primary key; +ALTER TABLE UserQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE UserQuota ADD UNIQUE (user); + +ALTER TABLE UserShareQuota DROP primary key; +ALTER TABLE UserShareQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE UserShareQuota ADD UNIQUE (user); + +ALTER TABLE OrgQuota DROP primary key; +ALTER TABLE OrgQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgQuota ADD UNIQUE (org_id); + +ALTER TABLE OrgUserQuota DROP primary key; +ALTER TABLE OrgUserQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgUserQuota ADD UNIQUE (org_id, user); + +ALTER TABLE Branch DROP primary key; +ALTER TABLE Branch ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE Branch ADD UNIQUE (repo_id, name); + +ALTER TABLE SeafileConf ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE FileLocks ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE OrgRepo DROP primary key; +ALTER TABLE OrgRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgRepo ADD UNIQUE (org_id, repo_id); + +ALTER TABLE OrgGroupRepo DROP primary key; +ALTER TABLE OrgGroupRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgGroupRepo ADD UNIQUE (org_id, group_id, repo_id); + +ALTER TABLE OrgInnerPubRepo DROP primary key; +ALTER TABLE OrgInnerPubRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgInnerPubRepo ADD UNIQUE (org_id, repo_id); + +ALTER TABLE RepoSyncError DROP primary key; +ALTER TABLE RepoSyncError ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoSyncError ADD UNIQUE (token); + +ALTER TABLE GCID DROP primary key; +ALTER TABLE GCID ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE GCID ADD UNIQUE (repo_id); + +ALTER TABLE LastGCID DROP primary key; +ALTER TABLE LastGCID ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE LastGCID ADD UNIQUE (repo_id, client_id); + +ALTER TABLE FolderUserPerm ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE FolderGroupPerm ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE FolderPermTimestamp DROP primary key; +ALTER TABLE FolderPermTimestamp ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE FolderPermTimestamp ADD UNIQUE (repo_id); + +ALTER TABLE WebUploadTempFiles ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE RepoStorageId ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE RoleQuota DROP primary key; +ALTER TABLE RoleQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RoleQuota ADD UNIQUE (role); + +CREATE TABLE IF NOT EXISTS OrgSharedRepo (id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,org_id INT, repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15), INDEX (org_id, repo_id), INDEX(from_email), INDEX(to_email)) ENGINE=INNODB; +ALTER TABLE OrgSharedRepo ADD INDEX(repo_id); + +ALTER TABLE OrgRepo ADD INDEX(user); diff --git a/scripts/upgrade/sql/6.3.0/mysql/seahub.sql b/scripts/upgrade/sql/6.3.0/mysql/seahub.sql new file mode 100644 index 0000000000..27d4b1fc49 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/mysql/seahub.sql @@ -0,0 +1,175 @@ +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_group` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(80) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_group` DISABLE KEYS */; +/*!40000 ALTER TABLE `auth_group` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_group_permissions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` int(11) NOT NULL, + `permission_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `auth_group_permissions_group_id_permission_id_0cd325b0_uniq` (`group_id`,`permission_id`), + KEY `auth_group_permissio_permission_id_84c5c92e_fk_auth_perm` (`permission_id`), + CONSTRAINT `auth_group_permissio_permission_id_84c5c92e_fk_auth_perm` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`), + CONSTRAINT `auth_group_permissions_group_id_b120cbf9_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_group_permissions` DISABLE KEYS */; +/*!40000 ALTER TABLE `auth_group_permissions` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_permission` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(255) NOT NULL, + `content_type_id` int(11) NOT NULL, + `codename` varchar(100) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `auth_permission_content_type_id_codename_01ab375a_uniq` (`content_type_id`,`codename`), + CONSTRAINT `auth_permission_content_type_id_2f476e4b_fk_django_co` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=209 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_permission` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_user` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `password` varchar(128) NOT NULL, + `last_login` datetime DEFAULT NULL, + `is_superuser` tinyint(1) NOT NULL, + `username` varchar(150) NOT NULL, + `first_name` varchar(30) NOT NULL, + `last_name` varchar(30) NOT NULL, + `email` varchar(254) NOT NULL, + `is_staff` tinyint(1) NOT NULL, + `is_active` tinyint(1) NOT NULL, + `date_joined` datetime NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `username` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_user` DISABLE KEYS */; +/*!40000 ALTER TABLE `auth_user` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_user_groups` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user_id` int(11) NOT NULL, + `group_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `auth_user_groups_user_id_group_id_94350c0c_uniq` (`user_id`,`group_id`), + KEY `auth_user_groups_group_id_97559544_fk_auth_group_id` (`group_id`), + CONSTRAINT `auth_user_groups_group_id_97559544_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`), + CONSTRAINT `auth_user_groups_user_id_6a12ed8b_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_user_groups` DISABLE KEYS */; +/*!40000 ALTER TABLE `auth_user_groups` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_user_user_permissions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user_id` int(11) NOT NULL, + `permission_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `auth_user_user_permissions_user_id_permission_id_14a6b632_uniq` (`user_id`,`permission_id`), + KEY `auth_user_user_permi_permission_id_1fbb5f2c_fk_auth_perm` (`permission_id`), + CONSTRAINT `auth_user_user_permi_permission_id_1fbb5f2c_fk_auth_perm` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`), + CONSTRAINT `auth_user_user_permissions_user_id_a95ead1b_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + + +/*!40000 ALTER TABLE `wiki_personalwiki` DISABLE KEYS */; +/*!40000 ALTER TABLE `wiki_personalwiki` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `wiki_wiki` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `name` varchar(255) NOT NULL, + `slug` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `permission` varchar(50) NOT NULL, + `created_at` datetime NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `slug` (`slug`), + UNIQUE KEY `wiki_wiki_username_3c0f83e1b93de663_uniq` (`username`,`repo_id`), + KEY `wiki_wiki_fde81f11` (`created_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `wiki_wiki` DISABLE KEYS */; +/*!40000 ALTER TABLE `wiki_wiki` ENABLE KEYS */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +CREATE TABLE IF NOT EXISTS `django_cas_ng_proxygrantingticket` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `session_key` varchar(255) DEFAULT NULL, + `pgtiou` varchar(255) DEFAULT NULL, + `pgt` varchar(255) DEFAULT NULL, + `date` datetime NOT NULL, + `user` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `django_cas_ng_proxygrant_session_key_user_id_4cd2ea19_uniq` (`session_key`,`user`), + KEY `django_cas_ng_proxyg_user_id_f833edd2_fk_auth_user` (`user`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +CREATE TABLE IF NOT EXISTS `django_cas_ng_sessionticket` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `session_key` varchar(255) NOT NULL, + `ticket` varchar(255) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `organizations_orgmemberquota` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `org_id` int(11) NOT NULL, + `quota` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `organizations_orgmemberquota_org_id_93dde51d` (`org_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +ALTER TABLE notifications_notification ADD INDEX `notifications_notification_386bba5a` (`primary`); + +ALTER TABLE institutions_institutionadmin ADD INDEX `institutions_institutionadmin_user_7560167c8413ff0e_uniq` (`user`); + +ALTER TABLE `post_office_attachment` add column `mimetype` varchar(255) NOT NULL; + +ALTER TABLE Event ADD INDEX `ix_event_timestamp` (`timestamp`); +ALTER TABLE FileAudit ADD INDEX `ix_FileAudit_timestamp` (`timestamp`); +ALTER TABLE FileUpdate ADD INDEX `ix_FileUpdate_timestamp` (`timestamp`); +ALTER TABLE UserTrafficStat ADD INDEX `ix_UserTrafficStat_month` (`month`); diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/groupmgr.sql b/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/groupmgr.sql new file mode 100644 index 0000000000..e52ed988e7 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/groupmgr.sql @@ -0,0 +1,2 @@ +CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, path VARCHAR(1024)); +alter table `Group` add column parent_group_id INTEGER default 0; -- Replace `Group` if you configured table `Group` to another name. diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/usermgr.sql b/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/usermgr.sql new file mode 100644 index 0000000000..830bfd3fbf --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/usermgr.sql @@ -0,0 +1 @@ +CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER); diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/seafevents.sql b/scripts/upgrade/sql/6.3.0/sqlite3/seafevents.sql new file mode 100644 index 0000000000..9327a2206b --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/seafevents.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS ix_event_timestamp ON Event (timestamp); +CREATE INDEX IF NOT EXISTS ix_FileAudit_timestamp ON FileAudit (timestamp); +CREATE INDEX IF NOT EXISTS ix_FileUpdate_timestamp ON FileUpdate (timestamp); +CREATE INDEX IF NOT EXISTS ix_UserTrafficStat_month ON UserTrafficStat (month); diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/seafile.sql b/scripts/upgrade/sql/6.3.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..b39a75cb12 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/seafile.sql @@ -0,0 +1,3 @@ +CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER); + +CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, name VARCHAR(255) NOT NULL, update_time INTEGER, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255)); diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/seahub.sql b/scripts/upgrade/sql/6.3.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..ef01d251d6 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/seahub.sql @@ -0,0 +1,39 @@ +CREATE TABLE IF NOT EXISTS "auth_group" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(80) NOT NULL UNIQUE); +CREATE TABLE IF NOT EXISTS "auth_group_permissions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "group_id" integer NOT NULL REFERENCES "auth_group" ("id"), "permission_id" integer NOT NULL REFERENCES "auth_permission" ("id")); +CREATE TABLE IF NOT EXISTS "auth_user_groups" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user_id" integer NOT NULL REFERENCES "auth_user" ("id"), "group_id" integer NOT NULL REFERENCES "auth_group" ("id")); +CREATE TABLE IF NOT EXISTS "auth_user_user_permissions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user_id" integer NOT NULL REFERENCES "auth_user" ("id"), "permission_id" integer NOT NULL REFERENCES "auth_permission" ("id")); +CREATE TABLE IF NOT EXISTS "auth_permission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "content_type_id" integer NOT NULL REFERENCES "django_content_type" ("id"), "codename" varchar(100) NOT NULL, "name" varchar(255) NOT NULL); +CREATE TABLE IF NOT EXISTS "auth_user" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "password" varchar(128) NOT NULL, "last_login" datetime NULL, "is_superuser" bool NOT NULL, "first_name" varchar(30) NOT NULL, "last_name" varchar(30) NOT NULL, "email" varchar(254) NOT NULL, "is_staff" bool NOT NULL, "is_active" bool NOT NULL, "date_joined" datetime NOT NULL, "username" varchar(150) NOT NULL UNIQUE); + +CREATE TABLE IF NOT EXISTS "organizations_orgmemberquota" ( + "id" integer NOT NULL PRIMARY KEY, + "org_id" integer NOT NULL, + "quota" integer NOT NULL +); +CREATE TABLE IF NOT EXISTS "django_cas_ng_sessionticket" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "session_key" varchar(255) NOT NULL, "ticket" varchar(255) NOT NULL); +CREATE TABLE IF NOT EXISTS "django_cas_ng_proxygrantingticket" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "session_key" varchar(255) NULL, "pgtiou" varchar(255) NULL, "pgt" varchar(255) NULL, "date" datetime NOT NULL, "user" varchar(255) NOT NULL); + +CREATE UNIQUE INDEX IF NOT EXISTS "auth_group_permissions_group_id_permission_id_0cd325b0_uniq" ON "auth_group_permissions" ("group_id", "permission_id"); +CREATE INDEX IF NOT EXISTS "auth_group_permissions_group_id_b120cbf9" ON "auth_group_permissions" ("group_id"); +CREATE INDEX IF NOT EXISTS "auth_group_permissions_permission_id_84c5c92e" ON "auth_group_permissions" ("permission_id"); +CREATE UNIQUE INDEX IF NOT EXISTS "auth_user_groups_user_id_group_id_94350c0c_uniq" ON "auth_user_groups" ("user_id", "group_id"); +CREATE INDEX IF NOT EXISTS "auth_user_groups_user_id_6a12ed8b" ON "auth_user_groups" ("user_id"); +CREATE INDEX IF NOT EXISTS "auth_user_groups_group_id_97559544" ON "auth_user_groups" ("group_id"); +CREATE UNIQUE INDEX IF NOT EXISTS "auth_user_user_permissions_user_id_permission_id_14a6b632_uniq" ON "auth_user_user_permissions" ("user_id", "permission_id"); +CREATE INDEX IF NOT EXISTS "auth_user_user_permissions_user_id_a95ead1b" ON "auth_user_user_permissions" ("user_id"); +CREATE INDEX IF NOT EXISTS "auth_user_user_permissions_permission_id_1fbb5f2c" ON "auth_user_user_permissions" ("permission_id"); +CREATE UNIQUE INDEX IF NOT EXISTS "auth_permission_content_type_id_codename_01ab375a_uniq" ON "auth_permission" ("content_type_id", "codename"); +CREATE INDEX IF NOT EXISTS "auth_permission_content_type_id_2f476e4b" ON "auth_permission" ("content_type_id"); + +CREATE TABLE IF NOT EXISTS "wiki_wiki" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "name" varchar(255) NOT NULL, "slug" varchar(255) NOT NULL UNIQUE, "repo_id" varchar(36) NOT NULL, "permission" varchar(50) NOT NULL, "created_at" datetime NOT NULL, UNIQUE ("username", "repo_id")); + +CREATE INDEX IF NOT EXISTS "wiki_wiki_fde81f11" ON "wiki_wiki" ("created_at"); + +CREATE INDEX IF NOT EXISTS "notifications_notification_386bba5a" ON "notifications_notification" ("primary"); +CREATE INDEX IF NOT EXISTS "institutions_institutionadmin_ee11cbb1" ON "institutions_institutionadmin" ("user"); + +CREATE INDEX IF NOT EXISTS "organizations_orgmemberquota_944dadb6" ON "organizations_orgmemberquota" ("org_id"); +CREATE UNIQUE INDEX IF NOT EXISTS "django_cas_ng_proxygrantingticket_session_key_user_8a4ec2bc_uniq" ON "django_cas_ng_proxygrantingticket" ("session_key", "user"); +CREATE INDEX IF NOT EXISTS "django_cas_ng_proxygrantingticket_user_1f42619d" ON "django_cas_ng_proxygrantingticket" ("user"); + +ALTER TABLE "post_office_attachment" add column "mimetype" varchar(255); diff --git a/scripts/upgrade/sql/7.0.0/mysql/ccnet.sql b/scripts/upgrade/sql/7.0.0/mysql/ccnet.sql new file mode 100644 index 0000000000..885c275b51 --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/mysql/ccnet.sql @@ -0,0 +1 @@ +ALTER TABLE UserRole ADD COLUMN is_manual_set INTEGER DEFAULT 0; diff --git a/scripts/upgrade/sql/7.0.0/mysql/seafile.sql b/scripts/upgrade/sql/7.0.0/mysql/seafile.sql new file mode 100644 index 0000000000..4bae30fc6c --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/mysql/seafile.sql @@ -0,0 +1,4 @@ +ALTER TABLE RepoInfo ADD COLUMN status INTEGER DEFAULT 0; +CREATE TABLE IF NOT EXISTS RepoSyncError (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, token CHAR(41), error_time BIGINT UNSIGNED, error_con VARCHAR(1024), UNIQUE INDEX(token)); +ALTER TABLE RepoSyncError MODIFY COLUMN error_con VARCHAR(1024); +CREATE TABLE IF NOT EXISTS WebUploadTempFiles (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL); diff --git a/scripts/upgrade/sql/7.0.0/mysql/seahub.sql b/scripts/upgrade/sql/7.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..230b950d6b --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/mysql/seahub.sql @@ -0,0 +1,125 @@ +CREATE TABLE IF NOT EXISTS `drafts_draft` ( + `id` int(11) NOT NULL, + `created_at` datetime(6) NOT NULL, + `updated_at` datetime(6) NOT NULL, + `username` varchar(255) NOT NULL, + `origin_repo_id` varchar(36) NOT NULL, + `origin_file_version` varchar(100) NOT NULL, + `draft_file_path` varchar(1024) NOT NULL, + `origin_file_uuid` char(32) NOT NULL, + `publish_file_version` varchar(100) DEFAULT NULL, + `status` varchar(20) NOT NULL, + PRIMARY KEY (`id`), + KEY `drafts_draft_origin_file_uuid_id_f150319e_fk_tags_file` (`origin_file_uuid`), + KEY `drafts_draft_created_at_e9f4523f` (`created_at`), + KEY `drafts_draft_updated_at_0a144b05` (`updated_at`), + KEY `drafts_draft_username_73e6738b` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `drafts_draftreviewer` ( + `id` int(11) NOT NULL, + `reviewer` varchar(255) NOT NULL, + `draft_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `drafts_draftreviewer_reviewer_e4c777ac` (`reviewer`), + KEY `drafts_draftreviewer_draft_id_4ea59775_fk_drafts_draft_id` (`draft_id`), + CONSTRAINT `drafts_draftreviewer_draft_id_4ea59775_fk_drafts_draft_id` FOREIGN KEY (`draft_id`) REFERENCES `drafts_draft` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `options_useroptions` ADD INDEX `options_useroptions_option_key_7bf7ae4b` (`option_key`); + +ALTER TABLE TotalStorageStat DROP primary key; +ALTER TABLE TotalStorageStat ADD `id` BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE TotalStorageStat ADD `org_id` INT NOT NULL DEFAULT -1; +ALTER TABLE TotalStorageStat ADD INDEX `idx_storage_time_org` (`timestamp`, `org_id`); + +ALTER TABLE FileOpsStat ADD `org_id` INT NOT NULL DEFAULT -1; +ALTER TABLE FileOpsStat ADD INDEX `idx_file_ops_time_org` (`timestamp`, `org_id`); + +ALTER TABLE UserActivityStat DROP primary key; +ALTER TABLE UserActivityStat ADD `id` BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE UserActivityStat ADD UNIQUE (name_time_md5); +ALTER TABLE UserActivityStat ADD `org_id` INT NOT NULL DEFAULT -1; +ALTER TABLE UserActivityStat ADD INDEX `idx_activity_time_org` (`timestamp`, `org_id`); + +DROP TABLE UserTrafficStat; + + + +CREATE TABLE IF NOT EXISTS `repo_tags_repotags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `name` varchar(255) NOT NULL, + `color` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + KEY `repo_tags_repotags_repo_id_1163a48f` (`repo_id`), + KEY `repo_tags_repotags_name_3f4c9027` (`name`), + KEY `repo_tags_repotags_color_1292b6c1` (`color`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + + +CREATE TABLE IF NOT EXISTS `file_tags_filetags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `file_uuid_id` char(32) NOT NULL, + `repo_tag_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `file_tags_filetags_file_uuid_id_e30f0ec8_fk_tags_file` (`file_uuid_id`), + KEY `file_tags_filetags_repo_tag_id_c39660cb_fk_repo_tags_repotags_id` (`repo_tag_id`), + CONSTRAINT `file_tags_filetags_file_uuid_id_e30f0ec8_fk_tags_file` FOREIGN KEY (`file_uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`), + CONSTRAINT `file_tags_filetags_repo_tag_id_c39660cb_fk_repo_tags_repotags_id` FOREIGN KEY (`repo_tag_id`) REFERENCES `repo_tags_repotags` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + + +CREATE TABLE IF NOT EXISTS `related_files_relatedfiles` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `o_uuid_id` char(32) NOT NULL, + `r_uuid_id` char(32) NOT NULL, + PRIMARY KEY (`id`), + KEY `related_files_relate_o_uuid_id_aaa8e613_fk_tags_file` (`o_uuid_id`), + KEY `related_files_relate_r_uuid_id_031751df_fk_tags_file` (`r_uuid_id`), + CONSTRAINT `related_files_relate_o_uuid_id_aaa8e613_fk_tags_file` FOREIGN KEY (`o_uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`), + CONSTRAINT `related_files_relate_r_uuid_id_031751df_fk_tags_file` FOREIGN KEY (`r_uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + + +CREATE TABLE IF NOT EXISTS `organizations_orgsettings` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `org_id` int(11) NOT NULL, + `role` varchar(100) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `organizations_orgsettings_org_id_630f6843_uniq` (`org_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +DROP INDEX `profile_profile_contact_email_0975e4bf_uniq` ON `profile_profile`; +ALTER TABLE `profile_profile` ADD CONSTRAINT `profile_profile_contact_email_0975e4bf_uniq` UNIQUE (`contact_email`); + +CREATE TABLE IF NOT EXISTS `social_auth_usersocialauth` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `provider` varchar(32) NOT NULL, + `uid` varchar(150) NOT NULL, + `extra_data` longtext NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `social_auth_usersocialauth_provider_uid_e6b5e668_uniq` (`provider`,`uid`), + KEY `social_auth_usersocialauth_username_3f06b5cf` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + + +ALTER TABLE `base_filecomment` ADD `detail` LONGTEXT DEFAULT NULL; +ALTER TABLE `base_filecomment` ADD `resolved` TINYINT(1) NOT NULL DEFAULT 0; +ALTER TABLE `base_filecomment` ADD INDEX `resolved` (`resolved`); + + + +CREATE TABLE IF NOT EXISTS `base_reposecretkey` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `secret_key` varchar(44) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `repo_id` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + diff --git a/scripts/upgrade/sql/7.0.0/sqlite3/ccnet/usermgr.sql b/scripts/upgrade/sql/7.0.0/sqlite3/ccnet/usermgr.sql new file mode 100644 index 0000000000..885c275b51 --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/sqlite3/ccnet/usermgr.sql @@ -0,0 +1 @@ +ALTER TABLE UserRole ADD COLUMN is_manual_set INTEGER DEFAULT 0; diff --git a/scripts/upgrade/sql/7.0.0/sqlite3/seafile.sql b/scripts/upgrade/sql/7.0.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..7c82724772 --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/sqlite3/seafile.sql @@ -0,0 +1,7 @@ +ALTER TABLE RepoInfo ADD COLUMN status INTEGER DEFAULT 0; +CREATE TABLE IF NOT EXISTS RepoSyncError (token CHAR(41) PRIMARY KEY, error_time BIGINT, error_con VARCHAR(1024)); +ALTER TABLE RepoSyncError RENAME TO TmpRepoSyncError; +CREATE TABLE RepoSyncError (token CHAR(41) PRIMARY KEY, error_time BIGINT, error_con VARCHAR(1024)); +INSERT INTO RepoSyncError SELECT * FROM TmpRepoSyncError; +DROP TABLE TmpRepoSyncError; +CREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL); diff --git a/scripts/upgrade/sql/7.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/7.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..a893b5b6a4 --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/sqlite3/seahub.sql @@ -0,0 +1,40 @@ +CREATE TABLE IF NOT EXISTS "drafts_draft" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL, "username" varchar(255) NOT NULL, "origin_repo_id" varchar(36) NOT NULL, "origin_file_version" varchar(100) NOT NULL, "draft_file_path" varchar(1024) NOT NULL, "publish_file_version" varchar(100) NULL, "status" varchar(20) NOT NULL, "origin_file_uuid" char(32) NOT NULL); +CREATE INDEX IF NOT EXISTS "drafts_draft_created_at_e9f4523f" ON "drafts_draft" ("created_at"); +CREATE INDEX IF NOT EXISTS "drafts_draft_updated_at_0a144b05" ON "drafts_draft" ("updated_at"); +CREATE INDEX IF NOT EXISTS "drafts_draft_username_73e6738b" ON "drafts_draft" ("username"); +CREATE INDEX IF NOT EXISTS "drafts_draft_origin_file_uuid_7c003c98" ON "drafts_draft" ("origin_file_uuid"); + +CREATE TABLE IF NOT EXISTS "drafts_draftreviewer" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "reviewer" varchar(255) NOT NULL, "draft_id" integer NOT NULL REFERENCES "drafts_draft" ("id")); +CREATE INDEX IF NOT EXISTS "drafts_draftreviewer_reviewer_e4c777ac" ON "drafts_draftreviewer" ("reviewer"); +CREATE INDEX IF NOT EXISTS "drafts_draftreviewer_draft_id_4ea59775" ON "drafts_draftreviewer" ("draft_id"); + +CREATE TABLE IF NOT EXISTS "social_auth_association" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "server_url" varchar(255) NOT NULL, "handle" varchar(255) NOT NULL, "secret" varchar(255) NOT NULL, "issued" integer NOT NULL, "lifetime" integer NOT NULL, "assoc_type" varchar(64) NOT NULL); +CREATE TABLE IF NOT EXISTS "social_auth_code" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "email" varchar(254) NOT NULL, "code" varchar(32) NOT NULL, "verified" bool NOT NULL, "timestamp" datetime NOT NULL); +CREATE TABLE IF NOT EXISTS "social_auth_nonce" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "server_url" varchar(255) NOT NULL, "timestamp" integer NOT NULL, "salt" varchar(65) NOT NULL); +CREATE TABLE IF NOT EXISTS "social_auth_partial" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(32) NOT NULL, "next_step" smallint unsigned NOT NULL, "backend" varchar(32) NOT NULL, "data" text NOT NULL, "timestamp" datetime NOT NULL); +CREATE TABLE IF NOT EXISTS "social_auth_usersocialauth" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "provider" varchar(32) NOT NULL, "uid" varchar(255) NOT NULL, "extra_data" text NOT NULL); + + +CREATE TABLE IF NOT EXISTS "repo_tags_repotags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "name" varchar(255) NOT NULL, "color" varchar(255) NOT NULL); +CREATE INDEX IF NOT EXISTS "repo_tags_repotags_repo_id_1163a48f" ON "repo_tags_repotags" ("repo_id"); +CREATE INDEX IF NOT EXISTS "repo_tags_repotags_name_3f4c9027" ON "repo_tags_repotags" ("name"); +CREATE INDEX IF NOT EXISTS "repo_tags_repotags_color_1292b6c1" ON "repo_tags_repotags" ("color"); + + +CREATE TABLE IF NOT EXISTS "file_tags_filetags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "file_uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"), "repo_tag_id" integer NOT NULL REFERENCES "repo_tags_repotags" ("id")); +CREATE INDEX IF NOT EXISTS "file_tags_filetags_file_uuid_id_e30f0ec8" ON "file_tags_filetags" ("file_uuid_id"); +CREATE INDEX IF NOT EXISTS "file_tags_filetags_repo_tag_id_c39660cb" ON "file_tags_filetags" ("repo_tag_id"); + + +CREATE TABLE IF NOT EXISTS "related_files_relatedfiles" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "o_uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"), "r_uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid")); +CREATE INDEX IF NOT EXISTS "related_files_relatedfiles_o_uuid_id_aaa8e613" ON "related_files_relatedfiles" ("o_uuid_id"); +CREATE INDEX IF NOT EXISTS "related_files_relatedfiles_r_uuid_id_031751df" ON "related_files_relatedfiles" ("r_uuid_id"); + + +ALTER TABLE "base_filecomment" ADD COLUMN "detail" text DEFAULT NULL; +ALTER TABLE "base_filecomment" ADD COLUMN "resolved" bool NOT NULL DEFAULT 0; +CREATE INDEX IF NOT EXISTS "base_filecomment_resolved_e0717eca" ON "base_filecomment" ("resolved"); + + +CREATE TABLE IF NOT EXISTS "base_reposecretkey" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL UNIQUE, "secret_key" varchar(44) NOT NULL); + diff --git a/scripts/upgrade/sql/7.1.0/mysql/seahub.sql b/scripts/upgrade/sql/7.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..c6bb448dc1 --- /dev/null +++ b/scripts/upgrade/sql/7.1.0/mysql/seahub.sql @@ -0,0 +1,73 @@ +CREATE TABLE IF NOT EXISTS `base_reposecretkey` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `secret_key` varchar(44) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `repo_id` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +ALTER TABLE `constance_config` MODIFY `value` longtext DEFAULT NULL; +ALTER TABLE `constance_config` CHANGE `key` `constance_key` varchar(255) NOT NULL; + +DROP INDEX `drafts_draft_origin_file_uuid_7c003c98_uniq` ON `drafts_draft`; +ALTER TABLE `drafts_draft` ADD CONSTRAINT `drafts_draft_origin_file_uuid_7c003c98_uniq` UNIQUE (`origin_file_uuid`); +CREATE INDEX `drafts_draft_origin_repo_id_8978ca2c` ON `drafts_draft` (`origin_repo_id`); + + +CREATE TABLE IF NOT EXISTS `file_participants_fileparticipant` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `uuid_id` char(32) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `file_participants_fileparticipant_uuid_id_username_c747dd36_uniq` (`uuid_id`,`username`), + CONSTRAINT `file_participants_fi_uuid_id_861b7339_fk_tags_file` FOREIGN KEY (`uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE IF NOT EXISTS `repo_api_tokens` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `app_name` varchar(255) NOT NULL, + `token` varchar(40) NOT NULL, + `generated_at` datetime NOT NULL, + `generated_by` varchar(255) NOT NULL, + `last_access` datetime NOT NULL, + `permission` varchar(15) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `token` (`token`), + KEY `repo_api_tokens_repo_id_47a50fef` (`repo_id`), + KEY `repo_api_tokens_app_name_7c395c31` (`app_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE IF NOT EXISTS `abuse_reports_abusereport` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `reporter` longtext DEFAULT NULL, + `repo_id` varchar(36) NOT NULL, + `repo_name` varchar(255) NOT NULL, + `file_path` longtext DEFAULT NULL, + `abuse_type` varchar(255) NOT NULL, + `description` longtext DEFAULT NULL, + `handled` tinyint(1) NOT NULL, + `time` datetime(6) NOT NULL, + PRIMARY KEY (`id`), + KEY `abuse_reports_abusereport_abuse_type_703d5335` (`abuse_type`), + KEY `abuse_reports_abusereport_handled_94b8304c` (`handled`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE IF NOT EXISTS `repo_share_invitation` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `permission` varchar(50) NOT NULL, + `invitation_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `repo_share_invitatio_invitation_id_b71effd2_fk_invitatio` (`invitation_id`), + KEY `repo_share_invitation_repo_id_7bcf84fa` (`repo_id`), + CONSTRAINT `repo_share_invitatio_invitation_id_b71effd2_fk_invitatio` FOREIGN KEY (`invitation_id`) REFERENCES `invitations_invitation` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `post_office_attachment` add column `headers` longtext DEFAULT NULL; + diff --git a/scripts/upgrade/sql/7.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/7.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..4af2aad3a0 --- /dev/null +++ b/scripts/upgrade/sql/7.1.0/sqlite3/seahub.sql @@ -0,0 +1,43 @@ +CREATE TABLE IF NOT EXISTS "base_reposecretkey" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL UNIQUE, "secret_key" varchar(44) NOT NULL); + + +DROP TABLE IF EXISTS "constance_config_old"; +ALTER TABLE "constance_config" RENAME TO "constance_config_old"; +CREATE TABLE IF NOT EXISTS "constance_config" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "constance_key" varchar(255) NOT NULL UNIQUE, "value" text NULL); +INSERT INTO "constance_config" ("id", "constance_key", "value") SELECT "id", "key", "value" FROM "constance_config_old"; +DROP TABLE "constance_config_old"; + + + +DROP TABLE IF EXISTS "drafts_draft_old"; +ALTER TABLE "drafts_draft" RENAME TO "drafts_draft_old"; +CREATE TABLE IF NOT EXISTS "drafts_draft" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL, "username" varchar(255) NOT NULL, "origin_file_version" varchar(100) NOT NULL, "draft_file_path" varchar(1024) NOT NULL, "origin_file_uuid" char(32) NOT NULL UNIQUE, "publish_file_version" varchar(100) NULL, "status" varchar(20) NOT NULL, "origin_repo_id" varchar(36) NOT NULL); +INSERT INTO "drafts_draft" ("id", "created_at", "updated_at", "username", "origin_file_version", "draft_file_path", "origin_file_uuid", "publish_file_version", "status", "origin_repo_id") SELECT "id", "created_at", "updated_at", "username", "origin_file_version", "draft_file_path", "origin_file_uuid", "publish_file_version", "status", "origin_repo_id" FROM "drafts_draft_old"; +DROP TABLE "drafts_draft_old"; + +CREATE INDEX IF NOT EXISTS "drafts_draft_created_at_e9f4523f" ON "drafts_draft" ("created_at"); +CREATE INDEX IF NOT EXISTS "drafts_draft_origin_repo_id_8978ca2c" ON "drafts_draft" ("origin_repo_id"); +CREATE INDEX IF NOT EXISTS "drafts_draft_updated_at_0a144b05" ON "drafts_draft" ("updated_at"); +CREATE INDEX IF NOT EXISTS "drafts_draft_username_73e6738b" ON "drafts_draft" ("username"); + + +CREATE TABLE IF NOT EXISTS "abuse_reports_abusereport" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "reporter" text NULL, "repo_id" varchar(36) NOT NULL, "repo_name" varchar(255) NOT NULL, "file_path" text NULL, "abuse_type" varchar(255) NOT NULL, "description" text NULL, "handled" bool NOT NULL, "time" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "abuse_reports_abusereport_abuse_type_703d5335" ON "abuse_reports_abusereport" ("abuse_type"); +CREATE INDEX IF NOT EXISTS "abuse_reports_abusereport_handled_94b8304c" ON "abuse_reports_abusereport" ("handled"); + + +CREATE TABLE IF NOT EXISTS "file_participants_fileparticipant" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid")); +CREATE UNIQUE INDEX IF NOT EXISTS "file_participants_fileparticipant_uuid_id_username_c747dd36_uniq" ON "file_participants_fileparticipant" ("uuid_id", "username"); +CREATE INDEX IF NOT EXISTS "file_participants_fileparticipant_uuid_id_861b7339" ON "file_participants_fileparticipant" ("uuid_id"); + + +CREATE TABLE IF NOT EXISTS "repo_share_invitation" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "path" text NOT NULL, "permission" varchar(50) NOT NULL, "invitation_id" integer NOT NULL REFERENCES "invitations_invitation" ("id")); +CREATE INDEX IF NOT EXISTS "repo_share_invitation_repo_id_7bcf84fa" ON "repo_share_invitation" ("repo_id"); +CREATE INDEX IF NOT EXISTS "repo_share_invitation_invitation_id_b71effd2" ON "repo_share_invitation" ("invitation_id"); + +CREATE TABLE IF NOT EXISTS "repo_api_tokens" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "app_name" varchar(255) NOT NULL, "token" varchar(40) NOT NULL UNIQUE, "generated_at" datetime NOT NULL, "generated_by" varchar(255) NOT NULL, "last_access" datetime NOT NULL, "permission" varchar(15) NOT NULL); +CREATE INDEX IF NOT EXISTS "repo_api_tokens_repo_id_47a50fef" ON "repo_api_tokens" ("repo_id"); +CREATE INDEX IF NOT EXISTS "repo_api_tokens_app_name_7c395c31" ON "repo_api_tokens" ("app_name"); + +ALTER TABLE "post_office_attachment" add column "headers" text DEFAULT NULL; + diff --git a/scripts/upgrade/sql/8.0.0/mysql/seafevents.sql b/scripts/upgrade/sql/8.0.0/mysql/seafevents.sql new file mode 100644 index 0000000000..57611397d1 --- /dev/null +++ b/scripts/upgrade/sql/8.0.0/mysql/seafevents.sql @@ -0,0 +1,4 @@ +ALTER TABLE `VirusFile` ADD COLUMN `has_ignored` TINYINT(1) NOT NULL DEFAULT 0; +ALTER TABLE `VirusFile` CHANGE `has_handle` `has_deleted` TINYINT(1); +ALTER TABLE `VirusFile` ADD INDEX `has_deleted` (`has_deleted`); +ALTER TABLE `VirusFile` ADD INDEX `has_ignored` (`has_ignored`); diff --git a/scripts/upgrade/sql/8.0.0/mysql/seahub.sql b/scripts/upgrade/sql/8.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..2c4a48ae10 --- /dev/null +++ b/scripts/upgrade/sql/8.0.0/mysql/seahub.sql @@ -0,0 +1,57 @@ +CREATE TABLE IF NOT EXISTS `ocm_share` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `shared_secret` varchar(36) NOT NULL, + `from_user` varchar(255) NOT NULL, + `to_user` varchar(255) NOT NULL, + `to_server_url` varchar(200) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `repo_name` varchar(255) NOT NULL, + `permission` varchar(50) NOT NULL, + `path` longtext NOT NULL, + `ctime` datetime(6) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `shared_secret` (`shared_secret`), + KEY `ocm_share_from_user_7fbb7bb6` (`from_user`), + KEY `ocm_share_to_user_4e255523` (`to_user`), + KEY `ocm_share_to_server_url_43f0e89b` (`to_server_url`), + KEY `ocm_share_repo_id_51937581` (`repo_id`) +) ENGINE = InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `ocm_share_received` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `shared_secret` varchar(36) NOT NULL, + `from_user` varchar(255) NOT NULL, + `to_user` varchar(255) NOT NULL, + `from_server_url` varchar(200) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `repo_name` varchar(255) NOT NULL, + `permission` varchar(50) NOT NULL, + `path` longtext NOT NULL, + `provider_id` varchar(40) NOT NULL, + `ctime` datetime(6) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `shared_secret` (`shared_secret`), + KEY `ocm_share_received_from_user_8137d8eb` (`from_user`), + KEY `ocm_share_received_to_user_0921d09a` (`to_user`), + KEY `ocm_share_received_from_server_url_10527b80` (`from_server_url`), + KEY `ocm_share_received_repo_id_9e77a1b9` (`repo_id`), + KEY `ocm_share_received_provider_id_60c873e0` (`provider_id`) +) ENGINE = InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `repo_auto_delete` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `days` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `repo_id` (`repo_id`) +) ENGINE = InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `external_department` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` int(11) NOT NULL, + `provider` varchar(32) NOT NULL, + `outer_id` bigint(20) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `group_id` (`group_id`), + UNIQUE KEY `external_department_provider_outer_id_8dns6vkw_uniq` (`provider`,`outer_id`) +) ENGINE = InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/8.0.0/sqlite3/seafevents.sql b/scripts/upgrade/sql/8.0.0/sqlite3/seafevents.sql new file mode 100644 index 0000000000..2bc16950e1 --- /dev/null +++ b/scripts/upgrade/sql/8.0.0/sqlite3/seafevents.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS "VirusFile_old"; +ALTER TABLE "VirusFile" RENAME TO "VirusFile_old"; +CREATE TABLE IF NOT EXISTS "VirusFile" ("vid" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "commit_id" varchar(40) NOT NULL, "file_path" text NOT NULL, "has_deleted" tinyint(1) NOT NULL, "has_ignored" TINYINT(1) NOT NULL DEFAULT 0); +INSERT INTO "VirusFile" ("vid", "repo_id", "commit_id", "file_path", "has_deleted") SELECT "vid", "repo_id", "commit_id", "file_path", "has_handle" FROM "VirusFile_old"; +DROP TABLE "VirusFile_old"; + +CREATE INDEX IF NOT EXISTS "VirusFile_repo_id_yewnci4gd" ON "VirusFile" ("repo_id"); +CREATE INDEX IF NOT EXISTS "VirusFile_has_deleted_834ndyts" ON "VirusFile" ("has_deleted"); +CREATE INDEX IF NOT EXISTS "VirusFile_has_ignored_d84tvuwg" ON "VirusFile" ("has_ignored"); diff --git a/scripts/upgrade/sql/8.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/8.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..0d6a52b24e --- /dev/null +++ b/scripts/upgrade/sql/8.0.0/sqlite3/seahub.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS "ocm_share" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "shared_secret" varchar(36) NOT NULL UNIQUE, "from_user" varchar(255) NOT NULL, "to_user" varchar(255) NOT NULL, "to_server_url" varchar(200) NOT NULL, "repo_id" varchar(36) NOT NULL, "repo_name" varchar(255) NOT NULL, "permission" varchar(50) NOT NULL, "path" text NOT NULL, "ctime" datetime(6) NOT NULL); +CREATE INDEX IF NOT EXISTS "ocm_share_from_user_7fbb7bb6" ON "ocm_share" ("from_user"); +CREATE INDEX IF NOT EXISTS "ocm_share_to_user_4e255523" ON "ocm_share" ("to_user"); +CREATE INDEX IF NOT EXISTS "ocm_share_to_server_url_43f0e89b" ON "ocm_share" ("to_server_url"); +CREATE INDEX IF NOT EXISTS "ocm_share_repo_id_51937581" ON "ocm_share" ("repo_id"); + +CREATE TABLE IF NOT EXISTS "ocm_share_received" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "shared_secret" varchar(36) NOT NULL UNIQUE, "from_user" varchar(255) NOT NULL, "to_user" varchar(255) NOT NULL, "from_server_url" varchar(200) NOT NULL, "repo_id" varchar(36) NOT NULL, "repo_name" varchar(255) NOT NULL, "permission" varchar(50) NOT NULL, "path" text NOT NULL, "provider_id" varchar(40) NOT NULL, "ctime" datetime(6) NOT NULL); +CREATE INDEX IF NOT EXISTS "ocm_share_received_from_user_8137d8eb" ON "ocm_share_received" ("from_user"); +CREATE INDEX IF NOT EXISTS "ocm_share_received_to_user_0921d09a" ON "ocm_share_received" ("to_user"); +CREATE INDEX IF NOT EXISTS "ocm_share_received_from_server_url_10527b80" ON "ocm_share_received" ("from_server_url"); +CREATE INDEX IF NOT EXISTS "ocm_share_received_repo_id_9e77a1b9" ON "ocm_share_received" ("repo_id"); +CREATE INDEX IF NOT EXISTS "ocm_share_received_provider_id_60c873e0" ON "ocm_share_received" ("provider_id"); + +CREATE TABLE IF NOT EXISTS "repo_auto_delete" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL UNIQUE, "days" integer NOT NULL); + +CREATE TABLE IF NOT EXISTS "external_department" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "group_id" integer NOT NULL UNIQUE, "provider" varchar(32) NOT NULL, "outer_id" bigint NOT NULL); +CREATE UNIQUE INDEX IF NOT EXISTS "external_department_provider_outer_id_8dns6vkw_uniq" ON "external_department" (`provider`,`outer_id`); diff --git a/scripts/upgrade/sql/9.0.0/mysql/seafevents.sql b/scripts/upgrade/sql/9.0.0/mysql/seafevents.sql new file mode 100644 index 0000000000..2d9ef35863 --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/mysql/seafevents.sql @@ -0,0 +1,2 @@ +ALTER TABLE `FileAudit` ADD INDEX `ix_FileAudit_user` (`user`); +ALTER TABLE `FileAudit` ADD INDEX `ix_FileAudit_repo_id` (`repo_id`); diff --git a/scripts/upgrade/sql/9.0.0/mysql/seafile.sql b/scripts/upgrade/sql/9.0.0/mysql/seafile.sql new file mode 100644 index 0000000000..aed07d16d2 --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/mysql/seafile.sql @@ -0,0 +1,2 @@ +ALTER TABLE `RepoUserToken` ADD INDEX `RepoUserToken_token` (`token`); +ALTER TABLE `RepoTokenPeerInfo` ADD INDEX `RepoTokenPeerInfo_peer_id` (`peer_id`); diff --git a/scripts/upgrade/sql/9.0.0/mysql/seahub.sql b/scripts/upgrade/sql/9.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..a74835d22a --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/mysql/seahub.sql @@ -0,0 +1,51 @@ +ALTER TABLE `api2_tokenv2` CHANGE COLUMN `device_name` `device_name` varchar(40) CHARACTER SET 'utf8mb4' COLLATE utf8mb4_unicode_ci NOT NULL; + +CREATE TABLE `custom_share_permission` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `name` varchar(255) NOT NULL, + `description` varchar(500) NOT NULL, + `permission` longtext NOT NULL, + PRIMARY KEY (`id`), + KEY `custom_share_permission_repo_id_578fe49f` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `ocm_via_webdav_received_shares` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `description` varchar(255) DEFAULT NULL, + `name` varchar(255) NOT NULL, + `owner` varchar(255) NOT NULL, + `owner_display_name` varchar(255) DEFAULT NULL, + `protocol_name` varchar(255) NOT NULL, + `shared_secret` varchar(255) NOT NULL, + `permissions` varchar(255) NOT NULL, + `provider_id` varchar(255) NOT NULL, + `resource_type` varchar(255) NOT NULL, + `share_type` varchar(255) NOT NULL, + `share_with` varchar(255) NOT NULL, + `shared_by` varchar(255) NOT NULL, + `shared_by_display_name` varchar(255) DEFAULT NULL, + `ctime` datetime(6) NOT NULL, + `is_dir` tinyint(1) NOT NULL, + PRIMARY KEY (`id`), + KEY `ocm_via_webdav_share_received_owner_261eaa70` (`owner`), + KEY `ocm_via_webdav_share_received_shared_secret_fbb6be5a` (`shared_secret`), + KEY `ocm_via_webdav_share_received_provider_id_a55680e9` (`provider_id`), + KEY `ocm_via_webdav_share_received_resource_type_a3c71b57` (`resource_type`), + KEY `ocm_via_webdav_share_received_share_type_7615aaab` (`share_type`), + KEY `ocm_via_webdav_share_received_share_with_5a23eb17` (`share_with`), + KEY `ocm_via_webdav_share_received_shared_by_1786d580` (`shared_by`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `onlyoffice_onlyofficedockey` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `doc_key` varchar(36) NOT NULL, + `username` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `file_path` longtext NOT NULL, + `repo_id_file_path_md5` varchar(100) NOT NULL, + `created_time` datetime(6) NOT NULL, + PRIMARY KEY (`id`), + KEY `onlyoffice_onlyofficedockey_doc_key_edba1352` (`doc_key`), + KEY `onlyoffice_onlyofficedockey_repo_id_file_path_md5_52002073` (`repo_id_file_path_md5`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/9.0.0/sqlite3/seafevents.sql b/scripts/upgrade/sql/9.0.0/sqlite3/seafevents.sql new file mode 100644 index 0000000000..5e67711757 --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/sqlite3/seafevents.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS "ix_FileAudit_user" ON "FileAudit" ("user"); +CREATE INDEX IF NOT EXISTS "ix_FileAudit_repo_id" ON "FileAudit" ("repo_id"); diff --git a/scripts/upgrade/sql/9.0.0/sqlite3/seafile.sql b/scripts/upgrade/sql/9.0.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..ed5e0fbcad --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/sqlite3/seafile.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS "RepoUserToken_token" ON "RepoUserToken" ("token"); +CREATE INDEX IF NOT EXISTS "RepoTokenPeerInfo_peer_id" ON "RepoTokenPeerInfo" ("peer_id"); diff --git a/scripts/upgrade/sql/9.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/9.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..fcec8940a7 --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/sqlite3/seahub.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS "custom_share_permission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "name" varchar(255) NOT NULL, "description" varchar(500) NOT NULL, "permission" , "reporter" text NOT NULL); +CREATE INDEX IF NOT EXISTS "custom_share_permission_repo_id_578fe49f" ON "custom_share_permission" ("repo_id"); + +CREATE TABLE IF NOT EXISTS "ocm_via_webdav_received_shares" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "description" varchar(255) NULL, "name" varchar(255) NOT NULL, "owner" varchar(255) NOT NULL, "owner_display_name" varchar(255) NULL, "protocol_name" varchar(255) NOT NULL, "shared_secret" varchar(255) NOT NULL, "permissions" varchar(255) NOT NULL, "provider_id" varchar(255) NOT NULL, "resource_type" varchar(255) NOT NULL, "share_type" varchar(255) NOT NULL, "share_with" varchar(255) NOT NULL, "shared_by" varchar(255) NOT NULL, "shared_by_display_name" varchar(255) NOT NULL, "ctime" datetime NOT NULL, "is_dir" bool NOT NULL); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_owner_261eaa70" ON "ocm_via_webdav_received_shares" ("owner"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_shared_secret_fbb6be5a" ON "ocm_via_webdav_received_shares" ("shared_secret"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_provider_id_a55680e9" ON "ocm_via_webdav_received_shares" ("provider_id"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_resource_type_a3c71b57" ON "ocm_via_webdav_received_shares" ("resource_type"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_share_type_7615aaab" ON "ocm_via_webdav_received_shares" ("share_type"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_share_with_5a23eb17" ON "ocm_via_webdav_received_shares" ("share_with"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_shared_by_1786d580" ON "ocm_via_webdav_received_shares" ("shared_by"); + +CREATE TABLE IF NOT EXISTS "onlyoffice_onlyofficedockey" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "doc_key" varchar(36) NOT NULL, "username" varchar(255) NOT NULL, "repo_id" varchar(36) NULL, "file_path" TEXT NOT NULL, "repo_id_file_path_md5" varchar(100) NOT NULL, "created_time" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "onlyoffice_onlyofficedockey_doc_key_edba1352" ON "onlyoffice_onlyofficedockey" ("doc_key"); +CREATE INDEX IF NOT EXISTS "onlyoffice_onlyofficedockey_repo_id_file_path_md5_52002073" ON "onlyoffice_onlyofficedockey" ("repo_id_file_path_md5"); diff --git a/scripts/upgrade/upgrade_1.2_1.3.sh b/scripts/upgrade/upgrade_1.2_1.3.sh new file mode 100755 index 0000000000..3bceb1e363 --- /dev/null +++ b/scripts/upgrade/upgrade_1.2_1.3.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.2.0 +current_version=1.3.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +# run django syncdb command +echo "------------------------------" +echo "updating seahub database ... " +echo +manage_py=${INSTALLPATH}/seahub/manage.py +pushd "${INSTALLPATH}/seahub" 2>/dev/null 1>&2 +if ! $PYTHON manage.py syncdb 2>/dev/null 1>&2; then + echo "failed" + exit -1 +fi +popd 2>/dev/null 1>&2 + +echo "DONE" +echo "------------------------------" +echo + +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo \ No newline at end of file diff --git a/scripts/upgrade/upgrade_1.3_1.4.sh b/scripts/upgrade/upgrade_1.3_1.4.sh new file mode 100755 index 0000000000..cbdc24646a --- /dev/null +++ b/scripts/upgrade/upgrade_1.3_1.4.sh @@ -0,0 +1,119 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.3 +current_version=1.4.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +# update database +echo "------------------------------" +echo "updating seahub database ... " +echo + +db_update_py=$UPGRADE_DIR/db_update_1.3_1.4.py +if ! $PYTHON $db_update_py $default_seahub_db 1>/dev/null; then + echo "failed" +fi + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/upgrade_1.4_1.5.sh b/scripts/upgrade/upgrade_1.4_1.5.sh new file mode 100755 index 0000000000..7cc7ab550f --- /dev/null +++ b/scripts/upgrade/upgrade_1.4_1.5.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.4 +current_version=1.5 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo \ No newline at end of file diff --git a/scripts/upgrade/upgrade_1.5_1.6.sh b/scripts/upgrade/upgrade_1.5_1.6.sh new file mode 100755 index 0000000000..9028108281 --- /dev/null +++ b/scripts/upgrade/upgrade_1.5_1.6.sh @@ -0,0 +1,122 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.5 +current_version=1.6 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +echo +echo "------------------------------" +echo "Updating seahub database ..." +echo + +seahub_db=${TOPDIR}/seahub.db +seahub_sql=${UPGRADE_DIR}/sql/1.6.0/sqlite3/seahub.sql +if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then + echo "Failed to update seahub database" + exit 1 +fi + +echo "DONE" +echo "------------------------------" +echo \ No newline at end of file diff --git a/scripts/upgrade/upgrade_1.6_1.7.sh b/scripts/upgrade/upgrade_1.6_1.7.sh new file mode 100755 index 0000000000..b9d1dc0395 --- /dev/null +++ b/scripts/upgrade/upgrade_1.6_1.7.sh @@ -0,0 +1,137 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.6 +current_version=1.7 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +echo +echo "------------------------------" +echo "Updating seafile/seahub database ..." +echo + +seahub_db=${TOPDIR}/seahub.db +seahub_sql=${UPGRADE_DIR}/sql/1.7.0/sqlite3/seahub.sql +if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then + echo "Failed to update seahub database" + exit 1 +fi + +seafile_db=${seafile_data_dir}/seafile.db +seafile_sql=${UPGRADE_DIR}/sql/1.7.0/sqlite3/seafile.sql +if ! sqlite3 "${seafile_db}" < "${seafile_sql}"; then + echo "Failed to update seafile database" + exit 1 +fi + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/upgrade_1.7_1.8.sh b/scripts/upgrade/upgrade_1.7_1.8.sh new file mode 100755 index 0000000000..f2c94c0e75 --- /dev/null +++ b/scripts/upgrade/upgrade_1.7_1.8.sh @@ -0,0 +1,130 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.7 +current_version=1.8 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +echo +echo "------------------------------" +echo "Updating seafile/seahub database ..." +echo + +seahub_db=${TOPDIR}/seahub.db +seahub_sql=${UPGRADE_DIR}/sql/1.8.0/sqlite3/seahub.sql +if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then + echo "Failed to update seahub database" + exit 1 +fi + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/upgrade_1.8_2.0.sh b/scripts/upgrade/upgrade_1.8_2.0.sh new file mode 100755 index 0000000000..c5cdbafe6a --- /dev/null +++ b/scripts/upgrade/upgrade_1.8_2.0.sh @@ -0,0 +1,137 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.8 +current_version=2.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +echo +echo "------------------------------" +echo "Updating seafile/seahub database ..." +echo + +seahub_db=${TOPDIR}/seahub.db +seahub_sql=${UPGRADE_DIR}/sql/2.0.0/sqlite3/seahub.sql +if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then + echo "Failed to update seahub database" + exit 1 +fi + +add_collate_script=${UPGRADE_DIR}/add_collate.sh +echo "fix seafile database case issues..." +if ! ${add_collate_script} ${default_ccnet_conf_dir} ${seafile_data_dir} ${seahub_db}; then + echo "Failed." + exit 1 +fi + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/upgrade_2.0_2.1.sh b/scripts/upgrade/upgrade_2.0_2.1.sh new file mode 100755 index 0000000000..5f5dbb4eb0 --- /dev/null +++ b/scripts/upgrade/upgrade_2.0_2.1.sh @@ -0,0 +1,206 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=2.0 +current_version=2.1 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 2.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + seafile_server_symlink=${TOPDIR}/seafile-server-latest + if [[ -L "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function gen_seafdav_conf() { + echo + echo "generating seafdav.conf ..." + echo + seafdav_conf=${default_conf_dir}/seafdav.conf + mkdir -p "${default_conf_dir}" + if ! $(cat > "${seafdav_conf}" </dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 2.2.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + seafile_server_symlink=${TOPDIR}/seafile-server-latest + if [[ -L "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +export SEAFILE_CONF_DIR=$seafile_data_dir + +migrate_avatars; + +update_database; + +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_2.2_3.0.sh b/scripts/upgrade/upgrade_2.2_3.0.sh new file mode 100755 index 0000000000..7cf6ab2872 --- /dev/null +++ b/scripts/upgrade/upgrade_2.2_3.0.sh @@ -0,0 +1,211 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=2.2 +current_version=3.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 3.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + seafile_server_symlink=${TOPDIR}/seafile-server-latest + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[32m${seafile_server_symlink}\033[m symbolic link to \033[32m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function show_notice_for_s3_ceph_user() { + echo "-----------------------------------------------------------------" + echo "Important: You are using ${backend} storage, please follow the following " + echo "upgrade notice to migrate your data to 3.0 format" + echo + echo " http://seacloud.cc/group/180/wiki/seafile-pro-3.0-upgrade-notice/" + echo "-----------------------------------------------------------------" + echo + echo +} + +check_backend_py=${UPGRADE_DIR}/check_backend.py +backend= +function migrate_seafile_data_format() { + backend=$($PYTHON ${check_backend_py}) + if [[ "${backend}" == "s3" || "${backend}" == "ceph" ]]; then + return + fi + seaf_migrate=${INSTALLPATH}/seafile/bin/seaf-migrate + echo + echo "Now migrating your seafile data to 3.0 format. It may take a while." + echo + if ! LD_LIBRARY_PATH=${SEAFILE_LD_LIBRARY_PATH} ${seaf_migrate} \ + -c "${default_ccnet_conf_dir}" -d "${seafile_data_dir}"; then + echo + echo "Failed to migrate seafile data to 3.0 format" + echo + exit 1; + fi + echo + echo "Successfully migrated seafile data to 3.0 format" + echo +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +export SEAFILE_CONF_DIR=$seafile_data_dir + +migrate_seafile_data_format; + +migrate_avatars; + +update_database; + +upgrade_seafile_server_latest_symlink; + +if [[ "${backend}" == "s3" || "${backend}" == "ceph" ]]; then + show_notice_for_s3_ceph_user; +else + echo + echo "-----------------------------------------------------------------" + echo "Upgraded your seafile server successfully." + echo "-----------------------------------------------------------------" + echo +fi diff --git a/scripts/upgrade/upgrade_3.0_3.1.sh b/scripts/upgrade/upgrade_3.0_3.1.sh new file mode 100755 index 0000000000..d3b7f4f2f8 --- /dev/null +++ b/scripts/upgrade/upgrade_3.0_3.1.sh @@ -0,0 +1,215 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=3.0 +current_version=3.1 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 3.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +migrate_avatars; + +update_database; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_3.1_4.0.sh b/scripts/upgrade/upgrade_3.1_4.0.sh new file mode 100755 index 0000000000..6a6bc9a9e3 --- /dev/null +++ b/scripts/upgrade/upgrade_3.1_4.0.sh @@ -0,0 +1,215 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=3.1 +current_version=4.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +migrate_avatars; + +update_database; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.0_4.1.sh b/scripts/upgrade/upgrade_4.0_4.1.sh new file mode 100755 index 0000000000..e4ee7e04f0 --- /dev/null +++ b/scripts/upgrade/upgrade_4.0_4.1.sh @@ -0,0 +1,235 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.0 +current_version=4.1 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function fix_mysql_user() { + + fix_script=${UPGRADE_DIR}/fix_mysql_user.py + if ! $PYTHON "${fix_script}"; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +fix_mysql_user; +update_database; + +migrate_avatars; + + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +chmod 0600 "$seahub_settings_py" +chmod 0700 "$seafile_data_dir" +chmod 0700 "$default_ccnet_conf_dir" +chmod 0700 "$default_conf_dir" +chmod 0700 "$TOPDIR"/pro-data + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.1_4.2.sh b/scripts/upgrade/upgrade_4.1_4.2.sh new file mode 100755 index 0000000000..d0ee40514d --- /dev/null +++ b/scripts/upgrade/upgrade_4.1_4.2.sh @@ -0,0 +1,226 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.1 +current_version=4.2 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.2.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function remove_es_index() { + local es_data_dir=$TOPDIR/pro-data/search/data + echo -n "Removing old search index ... " + rm -rf $es_data_dir && mkdir -p $es_data_dir + echo "Done" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; + +# We changed elasticsearch index settings in 4.2.0, need to recreate the index. +remove_es_index; + +migrate_avatars; + + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.2_4.3.sh b/scripts/upgrade/upgrade_4.2_4.3.sh new file mode 100755 index 0000000000..51b5213534 --- /dev/null +++ b/scripts/upgrade/upgrade_4.2_4.3.sh @@ -0,0 +1,241 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.2 +current_version=4.3 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.3.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function regenerate_secret_key() { + regenerate_secret_key_script=$UPGRADE_DIR/regenerate_secret_key.sh + if ! $regenerate_secret_key_script ; then + echo "Failed to regenerate the seahub secret key" + exit 1 + fi +} + +function remove_es_index() { + local es_data_dir=$TOPDIR/pro-data/search/data + echo -n "Removing old search index ... " + rm -rf $es_data_dir && mkdir -p $es_data_dir + echo "Done" +} + +function remove_office_files() { + rm -rf /tmp/seafile-office-output/html/* +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +regenerate_secret_key; + +update_database; + +# We changed elasticsearch index settings in 4.3.0, need to recreate the index. +remove_es_index; +remove_office_files; + +migrate_avatars; + + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.3_4.4.sh b/scripts/upgrade/upgrade_4.3_4.4.sh new file mode 100755 index 0000000000..c92b5b37d8 --- /dev/null +++ b/scripts/upgrade/upgrade_4.3_4.4.sh @@ -0,0 +1,216 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.3 +current_version=4.4 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.4.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; + +migrate_avatars; + + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.4_5.0.sh b/scripts/upgrade/upgrade_4.4_5.0.sh new file mode 100755 index 0000000000..cad8c7b662 --- /dev/null +++ b/scripts/upgrade/upgrade_4.4_5.0.sh @@ -0,0 +1,245 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py +pro_data_dir=${TOPDIR}/pro-data + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.4 +current_version=5.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 5.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function regenerate_secret_key() { + regenerate_secret_key_script=$UPGRADE_DIR/regenerate_secret_key.sh + if ! $regenerate_secret_key_script ; then + echo "Failed to regenerate the seahub secret key" + exit 1 + fi +} + +# copy ccnet.conf/seafile.conf etc. to conf/ dir, and make the original files read-only +function copy_confs_to_central_conf_dir() { + local confs=( + $default_ccnet_conf_dir/ccnet.conf + $seafile_data_dir/seafile.conf + $seahub_settings_py + $pro_data_dir/seafevents.conf + ) + for conffile in ${confs[*]}; do + if grep -q "This file has been moved" $conffile; then + continue + fi + cp $conffile $conffile.seafile-5.0.0-bak + cp -av $conffile $default_conf_dir/ + cat >$conffile</dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 5.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function regenerate_secret_key() { + regenerate_secret_key_script=$UPGRADE_DIR/regenerate_secret_key.sh + if ! $regenerate_secret_key_script ; then + echo "Failed to regenerate the seahub secret key" + exit 1 + fi +} + +# copy ccnet.conf/seafile.conf etc. to conf/ dir, and make the original files read-only +function copy_confs_to_central_conf_dir() { + local confs=( + $default_ccnet_conf_dir/ccnet.conf + $seafile_data_dir/seafile.conf + $seahub_settings_py + ) + for conffile in ${confs[*]}; do + if grep -q "This file has been moved" $conffile; then + continue + fi + cp $conffile $conffile.seafile-5.0.0-bak + cp -av $conffile $default_conf_dir/ + cat >$conffile</dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 6.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_6.0_6.1.sh b/scripts/upgrade/upgrade_6.0_6.1.sh new file mode 100755 index 0000000000..4545092311 --- /dev/null +++ b/scripts/upgrade/upgrade_6.0_6.1.sh @@ -0,0 +1,210 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=6.0 +current_version=6.1 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 6.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_6.1_6.2.sh b/scripts/upgrade/upgrade_6.1_6.2.sh new file mode 100755 index 0000000000..b963b3ad3b --- /dev/null +++ b/scripts/upgrade/upgrade_6.1_6.2.sh @@ -0,0 +1,210 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=6.1 +current_version=6.2 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 6.2.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_6.2_6.3.sh b/scripts/upgrade/upgrade_6.2_6.3.sh new file mode 100755 index 0000000000..1a0a167643 --- /dev/null +++ b/scripts/upgrade/upgrade_6.2_6.3.sh @@ -0,0 +1,239 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +default_pids_dir=${TOPDIR}/pids +default_logs_dir=${TOPDIR}/logs +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=6.2 +current_version=6.3 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 6.3.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function add_gunicorn_conf() { + gunicorn_conf=${default_conf_dir}/gunicorn.conf + if ! $(cat > ${gunicorn_conf} </dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 7.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function add_gunicorn_conf() { + gunicorn_conf=${default_conf_dir}/gunicorn.conf + if ! $(cat > ${gunicorn_conf} </dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + else + if [[ ${seafile_data_dir} != ${TOPDIR}/seafile-data ]]; then + if [[ ! -L ${TOPDIR}/seafile-data ]]; then + ln -s ${seafile_data_dir} ${TOPDIR}/seafile-data + echo "Created the symlink ${TOPDIR}/seafile-data for ${seafile_data_dir}." + fi + fi + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function rename_gunicorn_config() { + echo + echo "renaming the gunicorn.conf to gunicorn.conf.py ..." + echo + if [[ -f "${default_conf_dir}/gunicorn.conf" ]]; then + mv "${default_conf_dir}/gunicorn.conf" "${default_conf_dir}/gunicorn.conf.py" 1>/dev/null + fi + + if [[ -f "${default_conf_dir}/gunicorn.conf.py" ]]; then + echo 'Done' + else + echo "Failed to renamed the gunicorn.conf to gunicorn.conf.py." + exit 1 + fi +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 7.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +rename_gunicorn_config; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_7.1_8.0.sh b/scripts/upgrade/upgrade_7.1_8.0.sh new file mode 100755 index 0000000000..793dac162a --- /dev/null +++ b/scripts/upgrade/upgrade_7.1_8.0.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +default_pids_dir=${TOPDIR}/pids +default_logs_dir=${TOPDIR}/logs +default_seafile_data_dir=${TOPDIR}/seafile-data +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CONF_DIR=${default_seafile_data_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3.6/site-packages:${INSTALLPATH}/seafile/lib64/python3.6/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=7.1 +current_version=8.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function check_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${default_seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 8.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +check_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_8.0_9.0.sh b/scripts/upgrade/upgrade_8.0_9.0.sh new file mode 100755 index 0000000000..3d09b11189 --- /dev/null +++ b/scripts/upgrade/upgrade_8.0_9.0.sh @@ -0,0 +1,220 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +default_pids_dir=${TOPDIR}/pids +default_logs_dir=${TOPDIR}/logs +default_seafile_data_dir=${TOPDIR}/seafile-data +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CONF_DIR=${default_seafile_data_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=8.0 +current_version=9.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function check_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${default_seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 9.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function update_seahub_settings () { + service_url=`awk -F '=' '/\[General\]/{a=1}a==1&&$1~/SERVICE_URL/{print $2;exit}' ${default_conf_dir}/ccnet.conf` + service_url=$(echo $service_url) + echo "SERVICE_URL = '${service_url}'">>${default_conf_dir}/seahub_settings.py +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +check_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +update_seahub_settings; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo