diff --git a/scripts/build/build-server.py b/scripts/build/build-server.py new file mode 100755 index 0000000000..dbe8355abc --- /dev/null +++ b/scripts/build/build-server.py @@ -0,0 +1,859 @@ +#!/usr/bin/env python +# coding: UTF-8 + +'''This script builds the seafile server tarball. + +Some notes: + +1. The working directory is always the 'builddir'. 'os.chdir' is only called +to change to the 'builddir'. We make use of the 'cwd' argument in +'subprocess.Popen' to run a command in a specific directory. + +2. django/djangorestframework/djblets/gunicorn/flup must be easy_install-ed to +a directory before running this script. That directory is passed in as the +'--thirdpartdir' arguments. + +''' +import sys +import os +import glob +import subprocess +import tempfile +import shutil +import re +import subprocess +import optparse +import atexit +import platform + +#################### +### Global variables +#################### + +# command line configuartion +conf = {} + +# key names in the conf dictionary. +CONF_VERSION = 'version' +CONF_SEAFILE_VERSION = 'seafile_version' +CONF_LIBSEARPC_VERSION = 'libsearpc_version' +CONF_SRCDIR = 'srcdir' +CONF_KEEP = 'keep' +CONF_BUILDDIR = 'builddir' +CONF_OUTPUTDIR = 'outputdir' +CONF_THIRDPARTDIR = 'thirdpartdir' +CONF_NO_STRIP = 'nostrip' +CONF_ENABLE_S3 = 's3' +CONF_YES = 'yes' +CONF_JOBS = 'jobs' +CONF_MYSQL_CONFIG = 'mysql_config' + +#################### +### Common helper functions +#################### +def highlight(content, is_error=False): + '''Add ANSI color to content to get it highlighted on terminal''' + if is_error: + return '\x1b[1;31m%s\x1b[m' % content + else: + return '\x1b[1;32m%s\x1b[m' % content + +def info(msg): + print(highlight('[INFO] ') + msg) + +def find_in_path(prog): + '''Find a file in system path''' + dirs = os.environ['PATH'].split(':') + for d in dirs: + if d == '': + continue + path = os.path.join(d, prog) + if os.path.exists(path): + return path + + return None + +def error(msg=None, usage=None): + if msg: + print(highlight('[ERROR] ') + msg) + if usage: + print(usage) + sys.exit(1) + +def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Run a program and wait for it to finish, and return its exit code. The + standard output of this program is supressed. + + ''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(argv, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env) + return proc.wait() + +def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Like run_argv but specify a command line string instead of argv''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(cmdline, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env, + shell=True) + return proc.wait() + +def must_mkdir(path): + '''Create a directory, exit on failure''' + try: + os.mkdir(path) + except OSError as e: + error('failed to create directory %s:%s' % (path, e)) + +def must_copy(src, dst): + '''Copy src to dst, exit on failure''' + try: + shutil.copy(src, dst) + except Exception as e: + error('failed to copy %s to %s: %s' % (src, dst, e)) + +class Project(object): + '''Base class for a project''' + # Project name, i.e. libseaprc/seafile/seahub + name = '' + + # A list of shell commands to configure/build the project + build_commands = [] + + def __init__(self): + # the path to pass to --prefix=/ + self.prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile') + self.version = self.get_version() + self.src_tarball = os.path.join(conf[CONF_SRCDIR], + '%s-%s.tar.gz' % (self.name, self.version)) + # project dir, like /seafile-1.2.2/ + self.projdir = os.path.join(conf[CONF_BUILDDIR], '%s-%s' % (self.name, self.version)) + + def get_version(self): + # libsearpc can have different versions from seafile. + raise NotImplementedError + + def uncompress(self): + '''Uncompress the source from the tarball''' + info('Uncompressing %s' % self.name) + + if run('tar xf %s' % self.src_tarball) < 0: + error('failed to uncompress source of %s' % self.name) + + def build(self): + '''Build the source''' + info('Building %s' % self.name) + for cmd in self.build_commands: + if run(cmd, cwd=self.projdir) != 0: + error('error when running command:\n\t%s\n' % cmd) + +class Libsearpc(Project): + name = 'libsearpc' + + def __init__(self): + Project.__init__(self) + self.build_commands = [ + './configure --prefix=%s' % self.prefix, + 'make -j%s' % conf[CONF_JOBS], + 'make install' + ] + + def get_version(self): + return conf[CONF_LIBSEARPC_VERSION] + +class Seafile(Project): + name = 'seafile' + def __init__(self): + Project.__init__(self) + s3_support = '' + if conf[CONF_ENABLE_S3]: + s3_support = '--enable-s3' + + configure_command = './configure --prefix=%s %s --enable-ldap' % (self.prefix, s3_support) + if conf[CONF_MYSQL_CONFIG]: + configure_command += ' --with-mysql=%s' % conf[CONF_MYSQL_CONFIG] + + self.build_commands = [ + configure_command, + 'make -j%s' % conf[CONF_JOBS], + 'make install' + ] + + def get_version(self): + return conf[CONF_SEAFILE_VERSION] + +class Seahub(Project): + name = 'seahub' + def __init__(self): + Project.__init__(self) + # nothing to do for seahub + self.build_commands = [ + ] + + def get_version(self): + return conf[CONF_SEAFILE_VERSION] + + def build(self): + self.write_version_to_settings_py() + + Project.build(self) + + def write_version_to_settings_py(self): + '''Write the version of current seafile server to seahub''' + settings_py = os.path.join(self.projdir, 'seahub', 'settings.py') + + line = '\nSEAFILE_VERSION = "%s"\n' % conf[CONF_VERSION] + with open(settings_py, 'a+') as fp: + fp.write(line) + + +def check_seahub_thirdpart(thirdpartdir): + '''The ${thirdpartdir} must have django/djblets/gunicorn pre-installed. So + we can copy it to seahub/thirdpart + + ''' + thirdpart_libs = [ + 'Django', + # 'Djblets', + 'gunicorn', + #'flup', + 'chardet', + 'python_dateutil', + #'django_picklefield', + #'django_constance', + # 'SQLAlchemy', + # 'python_daemon', + # 'lockfile', + # 'six', + ] + def check_thirdpart_lib(name): + name += '*' + if not glob.glob(os.path.join(thirdpartdir, name)): + error('%s not found in %s' % (name, thirdpartdir)) + + for lib in thirdpart_libs: + check_thirdpart_lib(lib) + +def check_targz_src(proj, version, srcdir): + src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version)) + if not os.path.exists(src_tarball): + error('%s not exists' % src_tarball) + +def check_targz_src_no_version(proj, srcdir): + src_tarball = os.path.join(srcdir, '%s.tar.gz' % proj) + if not os.path.exists(src_tarball): + error('%s not exists' % src_tarball) + +def check_pdf2htmlEX(): + pdf2htmlEX_executable = find_in_path('pdf2htmlEX') + if pdf2htmlEX_executable is None: + error('pdf2htmlEX not found') + +def validate_args(usage, options): + required_args = [ + CONF_VERSION, + CONF_LIBSEARPC_VERSION, + CONF_SEAFILE_VERSION, + CONF_SRCDIR, + CONF_THIRDPARTDIR, + ] + + # fist check required args + for optname in required_args: + if getattr(options, optname, None) == None: + error('%s must be specified' % optname, usage=usage) + + def get_option(optname): + return getattr(options, optname) + + # [ version ] + def check_project_version(version): + '''A valid version must be like 1.2.2, 1.3''' + if not re.match('^[0-9]+(\.([0-9])+)+$', version): + error('%s is not a valid version' % version, usage=usage) + + version = get_option(CONF_VERSION) + seafile_version = get_option(CONF_SEAFILE_VERSION) + libsearpc_version = get_option(CONF_LIBSEARPC_VERSION) + + check_project_version(version) + check_project_version(libsearpc_version) + check_project_version(seafile_version) + + # [ srcdir ] + srcdir = get_option(CONF_SRCDIR) + check_targz_src('libsearpc', libsearpc_version, srcdir) + check_targz_src('seafile', seafile_version, srcdir) + check_targz_src('seahub', seafile_version, srcdir) + check_targz_src_no_version('seafdav', srcdir) + check_targz_src_no_version('seafobj', srcdir) + + # check_pdf2htmlEX() + + # [ builddir ] + builddir = get_option(CONF_BUILDDIR) + if not os.path.exists(builddir): + error('%s does not exist' % builddir, usage=usage) + + builddir = os.path.join(builddir, 'seafile-server-build') + + # [ thirdpartdir ] + thirdpartdir = get_option(CONF_THIRDPARTDIR) + check_seahub_thirdpart(thirdpartdir) + + # [ outputdir ] + outputdir = get_option(CONF_OUTPUTDIR) + if outputdir: + if not os.path.exists(outputdir): + error('outputdir %s does not exist' % outputdir, usage=usage) + else: + outputdir = os.getcwd() + + # [ yes ] + yes = get_option(CONF_YES) + + # [ jobs ] + jobs = get_option(CONF_JOBS) + + # [ keep ] + keep = get_option(CONF_KEEP) + + # [ no strip] + nostrip = get_option(CONF_NO_STRIP) + + # [ s3 ] + s3 = get_option(CONF_ENABLE_S3) + + mysql_config_path = get_option(CONF_MYSQL_CONFIG) + + conf[CONF_VERSION] = version + conf[CONF_LIBSEARPC_VERSION] = libsearpc_version + conf[CONF_SEAFILE_VERSION] = seafile_version + + conf[CONF_BUILDDIR] = builddir + conf[CONF_SRCDIR] = srcdir + conf[CONF_OUTPUTDIR] = outputdir + conf[CONF_KEEP] = keep + conf[CONF_THIRDPARTDIR] = thirdpartdir + conf[CONF_NO_STRIP] = nostrip + conf[CONF_ENABLE_S3] = s3 + conf[CONF_YES] = yes + conf[CONF_JOBS] = jobs + conf[CONF_MYSQL_CONFIG] = mysql_config_path + + prepare_builddir(builddir) + show_build_info() + +def show_build_info(): + '''Print all conf information. Confirm before continue.''' + info('------------------------------------------') + info('Seafile server %s: BUILD INFO' % conf[CONF_VERSION]) + info('------------------------------------------') + info('seafile: %s' % conf[CONF_SEAFILE_VERSION]) + info('libsearpc: %s' % conf[CONF_LIBSEARPC_VERSION]) + info('builddir: %s' % conf[CONF_BUILDDIR]) + info('outputdir: %s' % conf[CONF_OUTPUTDIR]) + info('source dir: %s' % conf[CONF_SRCDIR]) + info('strip symbols: %s' % (not conf[CONF_NO_STRIP])) + info('s3 support: %s' % (conf[CONF_ENABLE_S3])) + info('clean on exit: %s' % (not conf[CONF_KEEP])) + if conf[CONF_YES]: + return + info('------------------------------------------') + info('press any key to continue ') + info('------------------------------------------') + input() + +def prepare_builddir(builddir): + must_mkdir(builddir) + + if not conf[CONF_KEEP]: + def remove_builddir(): + '''Remove the builddir when exit''' + info('remove builddir before exit') + shutil.rmtree(builddir, ignore_errors=True) + atexit.register(remove_builddir) + + os.chdir(builddir) + + must_mkdir(os.path.join(builddir, 'seafile-server')) + must_mkdir(os.path.join(builddir, 'seafile-server', 'seafile')) + +def parse_args(): + parser = optparse.OptionParser() + def long_opt(opt): + return '--' + opt + + parser.add_option(long_opt(CONF_YES), + dest=CONF_YES, + action='store_true') + + parser.add_option(long_opt(CONF_JOBS), + dest=CONF_JOBS, + default=2, + type=int) + + parser.add_option(long_opt(CONF_THIRDPARTDIR), + dest=CONF_THIRDPARTDIR, + nargs=1, + help='where to find the thirdpart libs for seahub') + + parser.add_option(long_opt(CONF_VERSION), + dest=CONF_VERSION, + nargs=1, + help='the version to build. Must be digits delimited by dots, like 1.3.0') + + parser.add_option(long_opt(CONF_SEAFILE_VERSION), + dest=CONF_SEAFILE_VERSION, + nargs=1, + help='the version of seafile as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0') + + parser.add_option(long_opt(CONF_LIBSEARPC_VERSION), + dest=CONF_LIBSEARPC_VERSION, + nargs=1, + help='the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0') + + parser.add_option(long_opt(CONF_BUILDDIR), + dest=CONF_BUILDDIR, + nargs=1, + help='the directory to build the source. Defaults to /tmp', + default=tempfile.gettempdir()) + + parser.add_option(long_opt(CONF_OUTPUTDIR), + dest=CONF_OUTPUTDIR, + nargs=1, + help='the output directory to put the generated server tarball. Defaults to the current directory.', + default=os.getcwd()) + + parser.add_option(long_opt(CONF_SRCDIR), + dest=CONF_SRCDIR, + nargs=1, + help='''Source tarballs must be placed in this directory.''') + + parser.add_option(long_opt(CONF_KEEP), + dest=CONF_KEEP, + action='store_true', + help='''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''') + + parser.add_option(long_opt(CONF_NO_STRIP), + dest=CONF_NO_STRIP, + action='store_true', + help='''do not strip debug symbols''') + + parser.add_option(long_opt(CONF_ENABLE_S3), + dest=CONF_ENABLE_S3, + action='store_true', + help='''enable amazon s3 support''') + + parser.add_option(long_opt(CONF_MYSQL_CONFIG), + dest=CONF_MYSQL_CONFIG, + nargs=1, + help='''Absolute path to mysql_config or mariadb_config program.''') + + usage = parser.format_help() + options, remain = parser.parse_args() + if remain: + error(usage=usage) + + validate_args(usage, options) + +def setup_build_env(): + '''Setup environment variables, such as export PATH=$BUILDDDIR/bin:$PATH''' + prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile') + def prepend_env_value(name, value, seperator=':'): + '''append a new value to a list''' + try: + current_value = os.environ[name] + except KeyError: + current_value = '' + + new_value = value + if current_value: + new_value += seperator + current_value + + os.environ[name] = new_value + + prepend_env_value('CPPFLAGS', + '-I%s' % os.path.join(prefix, 'include'), + seperator=' ') + + prepend_env_value('CPPFLAGS', + '-DLIBICONV_PLUG', + seperator=' ') + + if conf[CONF_NO_STRIP]: + prepend_env_value('CPPFLAGS', + '-g -O0', + seperator=' ') + + prepend_env_value('CFLAGS', + '-g -O0', + seperator=' ') + + prepend_env_value('LDFLAGS', + '-L%s' % os.path.join(prefix, 'lib'), + seperator=' ') + + prepend_env_value('LDFLAGS', + '-L%s' % os.path.join(prefix, 'lib64'), + seperator=' ') + + prepend_env_value('PATH', os.path.join(prefix, 'bin')) + prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib', 'pkgconfig')) + prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib64', 'pkgconfig')) + +def copy_user_manuals(): + builddir = conf[CONF_BUILDDIR] + # src_pattern = os.path.join(builddir, Seafile().projdir, 'doc', '*.doc') + src_pattern = os.path.join(builddir, Seafile().projdir, 'doc', 'seafile-tutorial.doc') + dst_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'docs') + + must_mkdir(dst_dir) + + for path in glob.glob(src_pattern): + must_copy(path, dst_dir) + +def copy_seafdav(): + dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', 'thirdpart') + tarball = os.path.join(conf[CONF_SRCDIR], 'seafdav.tar.gz') + if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0: + error('failed to uncompress %s' % tarball) + + dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', 'thirdpart') + tarball = os.path.join(conf[CONF_SRCDIR], 'seafobj.tar.gz') + if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0: + error('failed to uncompress %s' % tarball) + +def copy_scripts_and_libs(): + '''Copy server release scripts and shared libs, as well as seahub + thirdpart libs + + ''' + builddir = conf[CONF_BUILDDIR] + scripts_srcdir = os.path.join(builddir, Seafile().projdir, 'scripts') + serverdir = os.path.join(builddir, 'seafile-server') + + must_copy(os.path.join(scripts_srcdir, 'setup-seafile.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'setup-seafile-mysql.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'setup-seafile-mysql.py'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seafile.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seahub.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'reset-admin.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-fuse.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'check_init_admin.py'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-gc.sh'), + serverdir) + must_copy(os.path.join(scripts_srcdir, 'seaf-fsck.sh'), + serverdir) + + # copy update scripts + update_scriptsdir = os.path.join(scripts_srcdir, 'upgrade') + dst_update_scriptsdir = os.path.join(serverdir, 'upgrade') + try: + shutil.copytree(update_scriptsdir, dst_update_scriptsdir) + except Exception as e: + error('failed to copy upgrade scripts: %s' % e) + + # copy sql scripts + sql_scriptsdir = os.path.join(scripts_srcdir, 'sql') + dst_sql_scriptsdir = os.path.join(serverdir, 'sql') + try: + shutil.copytree(sql_scriptsdir, dst_sql_scriptsdir) + except Exception as e: + error('failed to copy sql scripts: %s' % e) + + # copy runtime/seahub.conf + runtimedir = os.path.join(serverdir, 'runtime') + must_mkdir(runtimedir) + must_copy(os.path.join(scripts_srcdir, 'seahub.conf'), + runtimedir) + + # move seahub to seafile-server/seahub + src_seahubdir = Seahub().projdir + dst_seahubdir = os.path.join(serverdir, 'seahub') + try: + shutil.move(src_seahubdir, dst_seahubdir) + except Exception as e: + error('failed to move seahub to seafile-server/seahub: %s' % e) + + # copy seahub thirdpart libs + seahub_thirdpart = os.path.join(dst_seahubdir, 'thirdpart') + copy_seahub_thirdpart_libs(seahub_thirdpart) + copy_seafdav() + + + # copy_pdf2htmlex() + + # copy shared c libs + copy_shared_libs() + copy_user_manuals() + +def copy_pdf2htmlex(): + '''Copy pdf2htmlEX exectuable and its dependent libs''' + pdf2htmlEX_executable = find_in_path('pdf2htmlEX') + libs = get_dependent_libs(pdf2htmlEX_executable) + + builddir = conf[CONF_BUILDDIR] + dst_lib_dir = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'lib') + + dst_bin_dir = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'bin') + + for lib in libs: + dst_file = os.path.join(dst_lib_dir, os.path.basename(lib)) + if os.path.exists(dst_file): + continue + info('Copying %s' % lib) + must_copy(lib, dst_lib_dir) + + must_copy(pdf2htmlEX_executable, dst_bin_dir) + +def get_dependent_libs(executable): + syslibs = ['libsearpc', 'libseafile', 'libpthread.so', 'libc.so', 'libm.so', 'librt.so', 'libdl.so', 'libselinux.so', 'libresolv.so' ] + def is_syslib(lib): + for syslib in syslibs: + if syslib in lib: + return True + return False + + ldd_output = subprocess.getoutput('ldd %s' % executable) + ret = set() + for line in ldd_output.splitlines(): + tokens = line.split() + if len(tokens) != 4: + continue + if is_syslib(tokens[0]): + continue + + ret.add(tokens[2]) + + return ret + +def copy_shared_libs(): + '''copy shared c libs, such as libevent, glib, libmysqlclient''' + builddir = conf[CONF_BUILDDIR] + + dst_dir = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'lib') + + seafile_path = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'bin', + 'seaf-server') + + seaf_fuse_path = os.path.join(builddir, + 'seafile-server', + 'seafile', + 'bin', + 'seaf-fuse') + + libs = set() + libs.update(get_dependent_libs(seafile_path)) + libs.update(get_dependent_libs(seaf_fuse_path)) + + for lib in libs: + dst_file = os.path.join(dst_dir, os.path.basename(lib)) + if os.path.exists(dst_file): + continue + info('Copying %s' % lib) + shutil.copy(lib, dst_dir) + +def copy_seahub_thirdpart_libs(seahub_thirdpart): + '''copy python third-party libraries from ${thirdpartdir} to + seahub/thirdpart + + ''' + src = conf[CONF_THIRDPARTDIR] + dst = seahub_thirdpart + + try: + for name in os.listdir(src): + src_path = os.path.join(src, name) + target_path = os.path.join(dst, name) + if os.path.isdir(src_path): + shutil.copytree(src_path, target_path) + else: + shutil.copy(src_path, target_path) + except Exception as e: + error('failed to copy seahub thirdpart libs: %s' % e) + +def strip_symbols(): + def do_strip(fn): + run('chmod u+w %s' % fn) + info('stripping: %s' % fn) + run('strip "%s"' % fn) + + def remove_static_lib(fn): + info('removing: %s' % fn) + os.remove(fn) + + for parent, dnames, fnames in os.walk('seafile-server/seafile'): + dummy = dnames # avoid pylint 'unused' warning + for fname in fnames: + fn = os.path.join(parent, fname) + if os.path.isdir(fn): + continue + + if fn.endswith(".a") or fn.endswith(".la"): + remove_static_lib(fn) + continue + + if os.path.islink(fn): + continue + + finfo = subprocess.getoutput('file "%s"' % fn) + + if 'not stripped' in finfo: + do_strip(fn) + +def create_tarball(tarball_name): + '''call tar command to generate a tarball''' + version = conf[CONF_VERSION] + + serverdir = 'seafile-server' + versioned_serverdir = 'seafile-server-' + version + + # move seafile-server to seafile-server-${version} + try: + shutil.move(serverdir, versioned_serverdir) + except Exception as e: + error('failed to move %s to %s: %s' % (serverdir, versioned_serverdir, e)) + + ignored_patterns = [ + # common ignored files + '*.pyc', + '*~', + '*#', + + # seahub + os.path.join(versioned_serverdir, 'seahub', '.git*'), + os.path.join(versioned_serverdir, 'seahub', 'media', 'flexpaper*'), + os.path.join(versioned_serverdir, 'seahub', 'avatar', 'testdata*'), + + # seafile + os.path.join(versioned_serverdir, 'seafile', 'share*'), + os.path.join(versioned_serverdir, 'seafile', 'include*'), + os.path.join(versioned_serverdir, 'seafile', 'lib', 'pkgconfig*'), + os.path.join(versioned_serverdir, 'seafile', 'lib64', 'pkgconfig*'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'searpc-codegen.py'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile-admin'), + os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile'), + ] + + excludes_list = [ '--exclude=%s' % pattern for pattern in ignored_patterns ] + excludes = ' '.join(excludes_list) + + tar_cmd = 'tar czf %(tarball_name)s %(versioned_serverdir)s %(excludes)s' \ + % dict(tarball_name=tarball_name, + versioned_serverdir=versioned_serverdir, + excludes=excludes) + + if run(tar_cmd) < 0: + error('failed to generate the tarball') + +def gen_tarball(): + # strip symbols of libraries to reduce size + if not conf[CONF_NO_STRIP]: + try: + strip_symbols() + except Exception as e: + error('failed to strip symbols: %s' % e) + + # determine the output name + # 64-bit: seafile-server_1.2.2_x86-64.tar.gz + # 32-bit: seafile-server_1.2.2_i386.tar.gz + version = conf[CONF_VERSION] + arch = os.uname()[-1].replace('_', '-') + if 'arm' in platform.machine(): + arch = 'pi' + elif arch != 'x86-64': + arch = 'i386' + + dbg = '' + if conf[CONF_NO_STRIP]: + dbg = '.dbg' + + tarball_name = 'seafile-server_%(version)s_%(arch)s%(dbg)s.tar.gz' \ + % dict(version=version, arch=arch, dbg=dbg) + dst_tarball = os.path.join(conf[CONF_OUTPUTDIR], tarball_name) + + # generate the tarball + try: + create_tarball(tarball_name) + except Exception as e: + error('failed to generate tarball: %s' % e) + + # move tarball to outputdir + try: + shutil.copy(tarball_name, dst_tarball) + except Exception as e: + error('failed to copy %s to %s: %s' % (tarball_name, dst_tarball, e)) + + print('---------------------------------------------') + print('The build is successful. Output is:\t%s' % dst_tarball) + print('---------------------------------------------') + +def main(): + parse_args() + setup_build_env() + + libsearpc = Libsearpc() + seafile = Seafile() + seahub = Seahub() + + libsearpc.uncompress() + libsearpc.build() + + seafile.uncompress() + seafile.build() + + seahub.uncompress() + seahub.build() + + copy_scripts_and_libs() + gen_tarball() + +if __name__ == '__main__': + main() diff --git a/scripts/check_init_admin.py b/scripts/check_init_admin.py new file mode 100644 index 0000000000..cc7bcd37ae --- /dev/null +++ b/scripts/check_init_admin.py @@ -0,0 +1,368 @@ +#coding: UTF-8 + +'''This script would check if there is admin, and prompt the user to create a new one if non exist''' +import json +import sys +import os +import time +import re +import shutil +import glob +import subprocess +import hashlib +import getpass +import uuid +import warnings + +from configparser import ConfigParser + +from seaserv import ccnet_api + +try: + import readline # pylint: disable=W0611 +except ImportError: + pass + + +SERVER_MANUAL_HTTP = 'https://download.seafile.com/published/seafile-manual/home.md' + +class Utils(object): + '''Groups all helper functions here''' + @staticmethod + def welcome(): + '''Show welcome message''' + welcome_msg = '''\ +----------------------------------------------------------------- +This script will guide you to setup your seafile server using MySQL. +Make sure you have read seafile server manual at + + %s + +Press ENTER to continue +-----------------------------------------------------------------''' % SERVER_MANUAL_HTTP + print(welcome_msg) + input() + + @staticmethod + def highlight(content): + '''Add ANSI color to content to get it highlighted on terminal''' + return '\x1b[33m%s\x1b[m' % content + + @staticmethod + def info(msg): + print(msg) + + @staticmethod + def error(msg): + '''Print error and exit''' + print() + print('Error: ' + msg) + sys.exit(1) + + @staticmethod + def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Run a program and wait it to finish, and return its exit code. The + standard output of this program is supressed. + + ''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(argv, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env) + return proc.wait() + + @staticmethod + def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Like run_argv but specify a command line string instead of argv''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(cmdline, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env, + shell=True) + return proc.wait() + + @staticmethod + def prepend_env_value(name, value, env=None, seperator=':'): + '''prepend a new value to a list''' + if env is None: + env = os.environ + + try: + current_value = env[name] + except KeyError: + current_value = '' + + new_value = value + if current_value: + new_value += seperator + current_value + + env[name] = new_value + + @staticmethod + def must_mkdir(path): + '''Create a directory, exit on failure''' + try: + os.mkdir(path) + except OSError as e: + Utils.error('failed to create directory %s:%s' % (path, e)) + + @staticmethod + def must_copy(src, dst): + '''Copy src to dst, exit on failure''' + try: + shutil.copy(src, dst) + except Exception as e: + Utils.error('failed to copy %s to %s: %s' % (src, dst, e)) + + @staticmethod + def find_in_path(prog): + if 'win32' in sys.platform: + sep = ';' + else: + sep = ':' + + dirs = os.environ['PATH'].split(sep) + for d in dirs: + d = d.strip() + if d == '': + continue + path = os.path.join(d, prog) + if os.path.exists(path): + return path + + return None + + @staticmethod + def get_python_executable(): + '''Return the python executable. This should be the PYTHON environment + variable which is set in setup-seafile-mysql.sh + + ''' + return os.environ['PYTHON'] + + @staticmethod + def read_config(fn): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + cp = ConfigParser() + cp.optionxform = str + cp.read(fn) + + return cp + + @staticmethod + def write_config(cp, fn): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + with open(fn, 'w') as fp: + cp.write(fp) + + @staticmethod + def ask_question(desc, + key=None, + note=None, + default=None, + validate=None, + yes_or_no=False, + password=False): + '''Ask a question, return the answer. + @desc description, e.g. "What is the port of ccnet?" + + @key a name to represent the target of the question, e.g. "port for + ccnet server" + + @note additional information for the question, e.g. "Must be a valid + port number" + + @default the default value of the question. If the default value is + not None, when the user enter nothing and press [ENTER], the default + value would be returned + + @validate a function that takes the user input as the only parameter + and validate it. It should return a validated value, or throws an + "InvalidAnswer" exception if the input is not valid. + + @yes_or_no If true, the user must answer "yes" or "no", and a boolean + value would be returned + + @password If true, the user input would not be echoed to the + console + + ''' + assert key or yes_or_no + # Format description + print() + if note: + desc += '\n' + note + + desc += '\n' + if yes_or_no: + desc += '[ yes or no ]' + else: + if default: + desc += '[ default "%s" ]' % default + else: + desc += '[ %s ]' % key + + desc += ' ' + while True: + # prompt for user input + if password: + answer = getpass.getpass(desc).strip() + else: + answer = input(desc).strip() + + # No user input: use default + if not answer: + if default: + answer = default + else: + continue + + # Have user input: validate answer + if yes_or_no: + if answer not in ['yes', 'no']: + print(Utils.highlight('\nPlease answer yes or no\n')) + continue + else: + return answer == 'yes' + else: + if validate: + try: + return validate(answer) + except InvalidAnswer as e: + print(Utils.highlight('\n%s\n' % e)) + continue + else: + return answer + + @staticmethod + def validate_port(port): + try: + port = int(port) + except ValueError: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + if port <= 0 or port > 65535: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + return port + + +class InvalidAnswer(Exception): + def __init__(self, msg): + Exception.__init__(self) + self.msg = msg + def __str__(self): + return self.msg + +### END of Utils +#################### + +def need_create_admin(): + users = ccnet_api.get_emailusers('DB', 0, 1) + return len(users) == 0 + +def create_admin(email, passwd): + if ccnet_api.add_emailuser(email, passwd, 1, 1) < 0: + raise Exception('failed to create admin') + else: + print('\n\n') + print('----------------------------------------') + print('Successfully created seafile admin') + print('----------------------------------------') + print('\n\n') + +def ask_admin_email(): + print() + print('----------------------------------------') + print('It\'s the first time you start the seafile server. Now let\'s create the admin account') + print('----------------------------------------') + def validate(email): + # whitespace is not allowed + if re.match(r'[\s]', email): + raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email)) + # must be a valid email address + if not re.match(r'^.+@.*\..+$', email): + raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email)) + + return email + + key = 'admin email' + question = 'What is the ' + Utils.highlight('email') + ' for the admin account?' + return Utils.ask_question(question, + key=key, + validate=validate) + +def ask_admin_password(): + def validate(password): + key = 'admin password again' + question = 'Enter the ' + Utils.highlight('password again:') + password_again = Utils.ask_question(question, + key=key, + password=True) + + if password_again != password: + raise InvalidAnswer('password mismatch') + + return password + + key = 'admin password' + question = 'What is the ' + Utils.highlight('password') + ' for the admin account?' + return Utils.ask_question(question, + key=key, + password=True, + validate=validate) + + +def main(): + if not need_create_admin(): + return + + password_file = os.path.join(os.environ['SEAFILE_CENTRAL_CONF_DIR'], 'admin.txt') + if os.path.exists(password_file): + with open(password_file, 'r') as fp: + pwinfo = json.load(fp) + email = pwinfo['email'] + passwd = pwinfo['password'] + os.unlink(password_file) + else: + email = ask_admin_email() + passwd = ask_admin_password() + + create_admin(email, passwd) + +if __name__ == '__main__': + try: + main() + except KeyboardInterrupt: + print('\n\n\n') + print(Utils.highlight('Aborted.')) + print() + sys.exit(1) + except Exception as e: + print() + print(Utils.highlight('Error happened during creating seafile admin.')) + print() diff --git a/scripts/gc.bat b/scripts/gc.bat new file mode 100644 index 0000000000..84770301f1 --- /dev/null +++ b/scripts/gc.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\seahub\thirdpart +start python upgrade/py/gc.py diff --git a/scripts/reset-admin.sh b/scripts/reset-admin.sh new file mode 100755 index 0000000000..e6a9ad5075 --- /dev/null +++ b/scripts/reset-admin.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +central_config_dir=${TOPDIR}/conf + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function prepare_seahub_log_dir() { + logdir=${TOPDIR}/logs + if ! [[ -d ${logsdir} ]]; then + if ! mkdir -p "${logdir}"; then + echo "ERROR: failed to create logs dir \"${logdir}\"" + exit 1 + fi + fi + export SEAHUB_LOG_DIR=${logdir} +} + +check_python_executable; +validate_seafile_data_dir; +prepare_seahub_log_dir; + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CONF_DIR=${default_seafile_data_dir} +export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime + +manage_py=${INSTALLPATH}/seahub/manage.py +exec "$PYTHON" "$manage_py" createsuperuser diff --git a/scripts/seaf-fsck.sh b/scripts/seaf-fsck.sh new file mode 100755 index 0000000000..6c08be517b --- /dev/null +++ b/scripts/seaf-fsck.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +seaf_fsck=${INSTALLPATH}/seafile/bin/seaf-fsck + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + echo "$(basename ${script_name}) [-h/--help] [-r/--repair] [-E/--export path_to_export] [repo_id_1 [repo_id_2 ...]]" + echo "" +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function run_seaf_fsck () { + validate_seafile_data_dir; + + echo "Starting seaf-fsck, please wait ..." + echo + + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_fsck} \ + -c "${default_ccnet_conf_dir}" -d "${default_seafile_data_dir}" \ + -F "${default_conf_dir}" \ + ${seaf_fsck_opts} + + echo "seaf-fsck run done" + echo +} + +if [ $# -gt 0 ]; +then + for param in $@; + do + if [ ${param} = "-h" -o ${param} = "--help" ]; + then + usage; + exit 1; + fi + done +fi + +seaf_fsck_opts=$@ +run_seaf_fsck; + +echo "Done." diff --git a/scripts/seaf-fuse.sh b/scripts/seaf-fuse.sh new file mode 100755 index 0000000000..aad6c5d699 --- /dev/null +++ b/scripts/seaf-fuse.sh @@ -0,0 +1,122 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +seaf_fuse=${INSTALLPATH}/seafile/bin/seaf-fuse + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + echo "$(basename ${script_name}) { start | stop | restart } " + echo "" +} + +# check args +if [[ "$1" != "start" && "$1" != "stop" && "$1" != "restart" ]]; then + usage; + exit 1; +fi + +if [[ ($1 == "start" || $1 == "restart" ) && $# -lt 2 ]]; then + usage; + exit 1 +fi + +if [[ $1 == "stop" && $# != 1 ]]; then + usage; + exit 1 +fi + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function validate_already_running () { + if pid=$(pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null); then + echo "seaf-fuse is already running, pid $pid" + echo + exit 1; + fi +} + +function warning_if_seafile_not_running () { + if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo + echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?" + echo + fi +} + +function start_seaf_fuse () { + validate_already_running; + warning_if_seafile_not_running; + validate_seafile_data_dir; + + echo "Starting seaf-fuse, please wait ..." + + logfile=${TOPDIR}/logs/seaf-fuse.log + + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_fuse} \ + -c "${default_ccnet_conf_dir}" \ + -d "${default_seafile_data_dir}" \ + -F "${default_conf_dir}" \ + -l "${logfile}" \ + "$@" + + sleep 2 + + # check if seaf-fuse started successfully + if ! pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo "Failed to start seaf-fuse" + exit 1; + fi + + echo "seaf-fuse started" + echo +} + +function stop_seaf_fuse() { + if ! pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo "seaf-fuse not running yet" + return 1; + fi + + echo "Stopping seaf-fuse ..." + pkill -SIGTERM -f "seaf-fuse -c ${default_ccnet_conf_dir}" + return 0 +} + +function restart_seaf_fuse () { + stop_seaf_fuse + sleep 2 + start_seaf_fuse $@ +} + +case $1 in + "start" ) + shift + start_seaf_fuse $@; + ;; + "stop" ) + stop_seaf_fuse; + ;; + "restart" ) + shift + restart_seaf_fuse $@; +esac + +echo "Done." diff --git a/scripts/seaf-gc.sh b/scripts/seaf-gc.sh new file mode 100755 index 0000000000..e8f01560d3 --- /dev/null +++ b/scripts/seaf-gc.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_conf_dir=${TOPDIR}/conf +seaf_gc=${INSTALLPATH}/seafile/bin/seafserv-gc +seaf_gc_opts="" + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + echo "$(basename ${script_name}) [--dry-run | -D] [--rm-deleted | -r] [repo-id1] [repo-id2]" + echo "" +} + + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function check_component_running() { + name=$1 + cmd=$2 + if pid=$(pgrep -f "$cmd" 2>/dev/null); then + echo "[$name] is running, pid $pid. You can stop it by: " + echo + echo " kill $pid" + echo + echo "Stop it and try again." + echo + exit + fi +} + +function validate_already_running () { + if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then + echo "seafile server is still running, stop it by \"seafile.sh stop\"" + echo + exit 1; + fi + + check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}" + check_component_running "fileserver" "fileserver -c ${default_ccnet_conf_dir}" + check_component_running "seafdav" "wsgidav.server.server_cli" +} + +function run_seaf_gc () { + validate_already_running; + validate_seafile_data_dir; + + echo "Starting seafserv-gc, please wait ..." + + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_gc} \ + -c "${default_ccnet_conf_dir}" \ + -d "${default_seafile_data_dir}" \ + -F "${default_conf_dir}" \ + ${seaf_gc_opts} + + echo "seafserv-gc run done" + echo +} + +if [ $# -gt 0 ]; +then + for param in $@; + do + if [ ${param} = "-h" -o ${param} = "--help" ]; + then + usage; + exit 1; + fi + done +fi + +seaf_gc_opts=$@ +run_seaf_gc; + +echo "Done." diff --git a/scripts/seafile.sh b/scripts/seafile.sh new file mode 100755 index 0000000000..2585e4eaac --- /dev/null +++ b/scripts/seafile.sh @@ -0,0 +1,153 @@ +#!/bin/bash + +### BEGIN INIT INFO +# Provides: seafile +# Required-Start: $local_fs $remote_fs $network +# Required-Stop: $local_fs +# Default-Start: 1 2 3 4 5 +# Default-Stop: +# Short-Description: Starts Seafile Server +# Description: starts Seafile Server +### END INIT INFO + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +central_config_dir=${TOPDIR}/conf +seaf_controller="${INSTALLPATH}/seafile/bin/seafile-controller" + +export PATH=${INSTALLPATH}/seafile/bin:$PATH +export ORIG_LD_LIBRARY_PATH=${LD_LIBRARY_PATH} +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +script_name=$0 +function usage () { + echo "usage : " + echo "$(basename ${script_name}) { start | stop | restart } " + echo "" +} + +# check args +if [[ $# != 1 || ( "$1" != "start" && "$1" != "stop" && "$1" != "restart" ) ]]; then + usage; + exit 1; +fi + +function validate_running_user () { + real_data_dir=`readlink -f ${default_seafile_data_dir}` + running_user=`id -un` + data_dir_owner=`stat -c %U ${real_data_dir}` + + if [[ "${running_user}" != "${data_dir_owner}" ]]; then + echo "Error: the user running the script (\"${running_user}\") is not the owner of \"${real_data_dir}\" folder, you should use the user \"${data_dir_owner}\" to run the script." + exit -1; + fi +} + +function validate_central_conf_dir () { + if [[ ! -d ${central_config_dir} ]]; then + echo "Error: there is no conf/ directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit -1; + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function check_component_running() { + name=$1 + cmd=$2 + if pid=$(pgrep -f "$cmd" 2>/dev/null); then + echo "[$name] is running, pid $pid. You can stop it by: " + echo + echo " kill $pid" + echo + echo "Stop it and try again." + echo + exit + fi +} + +function validate_already_running () { + if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then + echo "Seafile controller is already running, pid $pid" + echo + exit 1; + fi + + check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}" + check_component_running "fileserver" "fileserver -c ${default_ccnet_conf_dir}" + check_component_running "seafdav" "wsgidav.server.server_cli" +} + +function start_seafile_server () { + validate_already_running; + validate_central_conf_dir; + validate_seafile_data_dir; + validate_running_user; + + echo "Starting seafile server, please wait ..." + + mkdir -p $TOPDIR/logs + LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_controller} \ + -c "${default_ccnet_conf_dir}" \ + -d "${default_seafile_data_dir}" \ + -F "${central_config_dir}" + + sleep 3 + + # check if seafile server started successfully + if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo "Failed to start seafile server" + exit 1; + fi + + echo "Seafile server started" + echo +} + +function stop_seafile_server () { + if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo "seafile server not running yet" + return 1; + fi + + echo "Stopping seafile server ..." + pkill -SIGTERM -f "seafile-controller -c ${default_ccnet_conf_dir}" + pkill -f "seaf-server -c ${default_ccnet_conf_dir}" + pkill -f "fileserver -c ${default_ccnet_conf_dir}" + pkill -f "soffice.*--invisible --nocrashreport" + pkill -f "wsgidav.server.server_cli" + return 0 +} + +function restart_seafile_server () { + stop_seafile_server; + sleep 2 + start_seafile_server; +} + +case $1 in + "start" ) + start_seafile_server; + ;; + "stop" ) + stop_seafile_server; + ;; + "restart" ) + restart_seafile_server; +esac + +echo "Done." diff --git a/scripts/seafobj_migrate.py b/scripts/seafobj_migrate.py new file mode 100755 index 0000000000..0e92830bc4 --- /dev/null +++ b/scripts/seafobj_migrate.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +#coding: utf-8 + +import os +import sys +import logging +from threading import Thread +import queue +import rados + +from seafobj.objstore_factory import SeafObjStoreFactory + +logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) + +class Worker(Thread): + def __init__(self, do_work, task_queue): + Thread.__init__(self) + self.do_work = do_work + self.task_queue = task_queue + + def run(self): + while True: + try: + task = self.task_queue.get() + if task is None: + break + self.do_work(task) + except Exception as e: + logging.warning('Failed to execute task: %s' % e) + finally: + self.task_queue.task_done() + +class ThreadPool(object): + def __init__(self, do_work, nworker=20): + self.do_work = do_work + self.nworker = nworker + self.task_queue = queue.Queue() + + def start(self): + for i in range(self.nworker): + Worker(self.do_work, self.task_queue).start() + + def put_task(self, task): + self.task_queue.put(task) + + def join(self): + self.task_queue.join() + # notify all thread to stop + for i in range(self.nworker): + self.task_queue.put(None) + +class Task(object): + def __init__(self, repo_id, repo_version, obj_id): + self.repo_id = repo_id + self.repo_version = repo_version + self.obj_id = obj_id + +class ObjMigrateWorker(Thread): + def __init__(self, orig_obj_factory, dest_obj_factory, dtype): + Thread.__init__(self) + self.dtype = dtype + self.orig_store = orig_obj_factory.get_obj_store(dtype) + self.dest_store = dest_obj_factory.get_obj_store(dtype) + self.thread_pool = ThreadPool(self.do_work) + + def run(self): + logging.info('Start to migrate [%s] object' % self.dtype) + self.thread_pool.start() + self.migrate() + self.thread_pool.join() + logging.info('Complete migrate [%s] object' % self.dtype) + + def do_work(self, task): + ioctx = self.dest_store.ceph_client.ioctx_pool.get_ioctx(task.repo_id) + try: + ioctx.stat(task.obj_id) + except rados.ObjectNotFound: + try: + data = self.orig_store.read_obj_raw(task.repo_id, task.repo_version, task.obj_id) + except Exception as e: + logging.warning('[%s] Failed to read object %s from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e)) + raise + + try: + ioctx.write_full(task.obj_id, data) + except Exception as e: + logging.warning('[%s] Failed to write object %s of repo %s to Ceph: %s' % (self.dtype, task.obj_id, task.repo_id, e)) + raise + except Exception as e: + logging.warning('[%s] Failed to stat object %s of repo %s in Ceph: %s' % (self.dtype, task.obj_id, task.repo_id, e)) + raise + finally: + self.dest_store.ceph_client.ioctx_pool.return_ioctx(ioctx) + + def migrate(self): + top_path = self.orig_store.obj_dir + for repo_id in os.listdir(top_path): + repo_path = os.path.join(top_path, repo_id) + for spath in os.listdir(repo_path): + obj_path = os.path.join(repo_path, spath) + for lpath in os.listdir(obj_path): + obj_id = spath + lpath + task = Task(repo_id, 1, obj_id) + self.thread_pool.put_task(task) + +def main(): + try: + fs_obj_factory = SeafObjStoreFactory() + os.environ['SEAFILE_CENTRAL_CONF_DIR'] = os.environ['CEPH_SEAFILE_CENTRAL_CONF_DIR'] + except KeyError: + logging.warning('CEPH_SEAFILE_CENTRAL_CONF_DIR environment variable is not set.\n') + sys.exit() + + ceph_obj_factory = SeafObjStoreFactory() + + dtypes = ['commits', 'fs', 'blocks'] + for dtype in dtypes: + ObjMigrateWorker(fs_obj_factory, ceph_obj_factory, dtype).start() + +if __name__ == '__main__': + main() diff --git a/scripts/seahub.conf b/scripts/seahub.conf new file mode 100644 index 0000000000..2183e82619 --- /dev/null +++ b/scripts/seahub.conf @@ -0,0 +1,15 @@ +import os + +daemon = True +workers = 5 + +# Logging +runtime_dir = os.path.dirname(__file__) +pidfile = os.path.join(runtime_dir, 'seahub.pid') +errorlog = os.path.join(runtime_dir, 'error.log') + +# disable access log +#accesslog = os.path.join(runtime_dir, 'access.log') + +# for file upload, we need a longer timeout value (default is only 30s, too short) +timeout = 1200 diff --git a/scripts/seahub.sh b/scripts/seahub.sh new file mode 100755 index 0000000000..e4835bcfea --- /dev/null +++ b/scripts/seahub.sh @@ -0,0 +1,294 @@ +#!/bin/bash + +### BEGIN INIT INFO +# Provides: seahub +# Required-Start: $local_fs $remote_fs $network +# Required-Stop: $local_fs +# Default-Start: 1 2 3 4 5 +# Default-Stop: +# Short-Description: Starts Seahub +# Description: starts Seahub +### END INIT INFO + +echo "" + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +central_config_dir=${TOPDIR}/conf +seafile_rpc_pipe_path=${INSTALLPATH}/runtime + +manage_py=${INSTALLPATH}/seahub/manage.py +gunicorn_conf=${TOPDIR}/conf/gunicorn.conf.py +pidfile=${TOPDIR}/pids/seahub.pid +errorlog=${TOPDIR}/logs/gunicorn_error.log +accesslog=${TOPDIR}/logs/gunicorn_access.log +gunicorn_exe=${INSTALLPATH}/seahub/thirdpart/bin/gunicorn + +script_name=$0 +function usage () { + echo "Usage: " + echo + echo " $(basename ${script_name}) { start | stop | restart }" + echo + echo "To run seahub in fastcgi:" + echo + echo " $(basename ${script_name}) { start-fastcgi | stop | restart-fastcgi }" + echo + echo " is optional, and defaults to 8000" + echo "" +} + +# Check args +if [[ $1 != "start" && $1 != "stop" && $1 != "restart" \ + && $1 != "start-fastcgi" && $1 != "restart-fastcgi" && $1 != "clearsessions" && $1 != "python-env" ]]; then + usage; + exit 1; +fi + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function validate_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Error: there is no seafile server data directory." + echo "Have you run setup-seafile.sh before this?" + echo "" + exit 1; + fi +} + +function validate_seahub_running () { + if pgrep -f "${manage_py}" 2>/dev/null 1>&2; then + echo "Seahub is already running." + exit 1; + elif pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo "Seahub is already running." + exit 1; + fi +} + +function validate_port () { + if ! [[ ${port} =~ ^[1-9][0-9]{1,4}$ ]] ; then + printf "\033[033m${port}\033[m is not a valid port number\n\n" + usage; + exit 1 + fi +} + +if [[ ($1 == "start" || $1 == "restart" || $1 == "start-fastcgi" || $1 == "restart-fastcgi") \ + && ($# == 2 || $# == 1) ]]; then + if [[ $# == 2 ]]; then + port=$2 + validate_port + else + port=8000 + fi +elif [[ $1 == "stop" && $# == 1 ]]; then + dummy=dummy +elif [[ $1 == "clearsessions" && $# == 1 ]]; then + dummy=dummy +elif [[ $1 == "python-env" ]]; then + dummy=dummy +else + usage; + exit 1 +fi + +function warning_if_seafile_not_running () { + if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then + echo + echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?" + echo + exit 1 + fi +} + +function prepare_seahub_log_dir() { + logdir=${TOPDIR}/logs + if ! [[ -d ${logsdir} ]]; then + if ! mkdir -p "${logdir}"; then + echo "ERROR: failed to create logs dir \"${logdir}\"" + exit 1 + fi + fi + export SEAHUB_LOG_DIR=${logdir} +} + +function before_start() { + prepare_env; + warning_if_seafile_not_running; + validate_seahub_running; + prepare_seahub_log_dir; +} + +function start_seahub () { + before_start; + echo "Starting seahub at port ${port} ..." + check_init_admin; + $PYTHON $gunicorn_exe seahub.wsgi:application -c "${gunicorn_conf}" --preload + + # Ensure seahub is started successfully + sleep 5 + if ! pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + printf "\033[33mError:Seahub failed to start.\033[m\n" + echo "Please try to run \"./seahub.sh start\" again" + exit 1; + fi + echo + echo "Seahub is started" + echo +} + +function start_seahub_fastcgi () { + before_start; + + # Returns 127.0.0.1 if SEAFILE_FASTCGI_HOST is unset or hasn't got any value, + # otherwise returns value of SEAFILE_FASTCGI_HOST environment variable + address=`(test -z "$SEAFILE_FASTCGI_HOST" && echo "127.0.0.1") || echo $SEAFILE_FASTCGI_HOST` + + echo "Starting seahub (fastcgi) at ${address}:${port} ..." + check_init_admin; + $PYTHON "${manage_py}" runfcgi maxchildren=8 host=$address port=$port pidfile=$pidfile \ + outlog=${accesslog} errlog=${errorlog} + + # Ensure seahub is started successfully + sleep 5 + if ! pgrep -f "${manage_py}" 1>/dev/null; then + printf "\033[33mError:Seahub failed to start.\033[m\n" + exit 1; + fi + echo + echo "Seahub is started" + echo +} + +function prepare_env() { + check_python_executable; + validate_seafile_data_dir; + + if [[ -z "$LANG" ]]; then + echo "LANG is not set in ENV, set to en_US.UTF-8" + export LANG='en_US.UTF-8' + fi + if [[ -z "$LC_ALL" ]]; then + echo "LC_ALL is not set in ENV, set to en_US.UTF-8" + export LC_ALL='en_US.UTF-8' + fi + + export CCNET_CONF_DIR=${default_ccnet_conf_dir} + export SEAFILE_CONF_DIR=${default_seafile_data_dir} + export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir} + export SEAFILE_RPC_PIPE_PATH=${seafile_rpc_pipe_path} + export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + + +} + +function clear_sessions () { + prepare_env; + + echo "Start clear expired session records ..." + $PYTHON "${manage_py}" clearsessions + + echo + echo "Done" + echo +} + +function stop_seahub () { + if [[ -f ${pidfile} ]]; then + echo "Stopping seahub ..." + pkill -9 -f "thirdpart/bin/gunicorn" + sleep 1 + if pgrep -f "thirdpart/bin/gunicorn" 2>/dev/null 1>&2 ; then + echo 'Failed to stop seahub.' + exit 1 + fi + rm -f ${pidfile} + return 0 + else + echo "Seahub is not running" + fi +} + +function check_init_admin() { + check_init_admin_script=${INSTALLPATH}/check_init_admin.py + if ! $PYTHON $check_init_admin_script; then + exit 1 + fi +} + +function run_python_env() { + local pyexec + + prepare_env; + + if which ipython 2>/dev/null; then + pyexec=ipython + else + pyexec=$PYTHON + fi + + if [[ $# -eq 0 ]]; then + $pyexec "$@" + else + "$@" + fi +} + +case $1 in + "start" ) + start_seahub; + ;; + "start-fastcgi" ) + start_seahub_fastcgi; + ;; + "stop" ) + stop_seahub; + ;; + "restart" ) + stop_seahub + sleep 2 + start_seahub + ;; + "restart-fastcgi" ) + stop_seahub + sleep 2 + start_seahub_fastcgi + ;; + "python-env") + shift + run_python_env "$@" + ;; + "clearsessions" ) + clear_sessions + ;; +esac + +echo "Done." +echo "" diff --git a/scripts/server-release.md b/scripts/server-release.md new file mode 100644 index 0000000000..c02226a6a0 --- /dev/null +++ b/scripts/server-release.md @@ -0,0 +1,31 @@ +# Server Release Package + +1. Libsearpc + cd libsearpc; + CFLAGS="-O2" configure --prefix=$dest + make install +2. Ccnet + cd ccnet; + CFLAGS="-O2" ./configure --enable-server-pkg --prefix=$dest + make install +3. Seafile + cd seafile; + CFLAGS="-O2" configure --enable-server-pkg --prefix=$dest + make install +4. copy shared libraries + scripts/cp-shared-lib.py $dest/lib +5. strip libs/executables + python do-strip.py +6. Update seahub + cd seahub + git fetch origin + git checkout release + git rebase origin/master + +7. Pack + ./pack-server.sh 1.0.0 + +DONE! + + + diff --git a/scripts/setup-seafile-mysql.py b/scripts/setup-seafile-mysql.py new file mode 100644 index 0000000000..a50a11ace1 --- /dev/null +++ b/scripts/setup-seafile-mysql.py @@ -0,0 +1,1565 @@ +#coding: UTF-8 + +'''This script would guide the seafile admin to setup seafile with MySQL''' +import argparse +import sys +import os +import time +import re +import shutil +import glob +import subprocess +import hashlib +import getpass +import uuid +import warnings +import socket +from configparser import ConfigParser + +import pymysql + +try: + import readline # pylint: disable=W0611 +except ImportError: + pass + + +SERVER_MANUAL_HTTP = 'https://download.seafile.com/published/seafile-manual/home.md' + +class Utils(object): + '''Groups all helper functions here''' + @staticmethod + def welcome(): + '''Show welcome message''' + welcome_msg = '''\ +----------------------------------------------------------------- +This script will guide you to setup your seafile server using MySQL. +Make sure you have read seafile server manual at + + %s + +Press ENTER to continue +-----------------------------------------------------------------''' % SERVER_MANUAL_HTTP + print(welcome_msg) + input() + + @staticmethod + def highlight(content): + '''Add ANSI color to content to get it highlighted on terminal''' + return '\x1b[33m%s\x1b[m' % content + + @staticmethod + def info(msg): + print(msg) + + @staticmethod + def error(msg): + '''Print error and exit''' + print() + print('Error: ' + msg) + sys.exit(1) + + @staticmethod + def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Run a program and wait it to finish, and return its exit code. The + standard output of this program is supressed. + + ''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(argv, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env) + return proc.wait() + + @staticmethod + def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Like run_argv but specify a command line string instead of argv''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(cmdline, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env, + shell=True) + return proc.wait() + + @staticmethod + def get_command_output(args, *a, **kw): + return subprocess.check_output(args, *a, **kw) + + @staticmethod + def prepend_env_value(name, value, env=None, seperator=':'): + '''prepend a new value to a list''' + if env is None: + env = os.environ + + try: + current_value = env[name] + except KeyError: + current_value = '' + + new_value = value + if current_value: + new_value += seperator + current_value + + env[name] = new_value + + @staticmethod + def must_mkdir(path): + '''Create a directory, exit on failure''' + if os.path.exists(path): + return + try: + os.makedirs(path) + except OSError as e: + Utils.error('failed to create directory %s:%s' % (path, e)) + + @staticmethod + def must_copy(src, dst): + '''Copy src to dst, exit on failure''' + try: + shutil.copy(src, dst) + except Exception as e: + Utils.error('failed to copy %s to %s: %s' % (src, dst, e)) + + @staticmethod + def find_in_path(prog): + if 'win32' in sys.platform: + sep = ';' + else: + sep = ':' + + dirs = os.environ['PATH'].split(sep) + for d in dirs: + d = d.strip() + if d == '': + continue + path = os.path.join(d, prog) + if os.path.exists(path): + return path + + return None + + @staticmethod + def get_python_executable(): + '''Return the python executable. This should be the PYTHON environment + variable which is set in setup-seafile-mysql.sh + + ''' + return os.environ['PYTHON'] + + @staticmethod + def read_config(fn): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + cp = ConfigParser() + cp.optionxform = str + cp.read(fn) + + return cp + + @staticmethod + def write_config(cp, fn): + '''Return a case sensitive ConfigParser by reading the file "fn"''' + with open(fn, 'w') as fp: + cp.write(fp) + + @staticmethod + def ask_question(desc, + key=None, + note=None, + default=None, + validate=None, + yes_or_no=False, + password=False): + '''Ask a question, return the answer. + @desc description, e.g. "What is the port of ccnet?" + + @key a name to represent the target of the question, e.g. "port for + ccnet server" + + @note additional information for the question, e.g. "Must be a valid + port number" + + @default the default value of the question. If the default value is + not None, when the user enter nothing and press [ENTER], the default + value would be returned + + @validate a function that takes the user input as the only parameter + and validate it. It should return a validated value, or throws an + "InvalidAnswer" exception if the input is not valid. + + @yes_or_no If true, the user must answer "yes" or "no", and a boolean + value would be returned + + @password If true, the user input would not be echoed to the + console + + ''' + assert key or yes_or_no + # Format description + print() + if note: + desc += '\n' + note + + desc += '\n' + if yes_or_no: + desc += '[ yes or no ]' + else: + if default: + desc += '[ default "%s" ]' % default + else: + desc += '[ %s ]' % key + + desc += ' ' + while True: + # prompt for user input + if password: + answer = getpass.getpass(desc).strip() + else: + answer = input(desc).strip() + + # No user input: use default + if not answer: + if default: + answer = default + else: + continue + + # Have user input: validate answer + if yes_or_no: + if answer not in ['yes', 'no']: + print(Utils.highlight('\nPlease answer yes or no\n')) + continue + else: + return answer == 'yes' + else: + if validate: + try: + return validate(answer) + except InvalidAnswer as e: + print(Utils.highlight('\n%s\n' % e)) + continue + else: + return answer + + @staticmethod + def validate_port(port): + try: + port = int(port) + except ValueError: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + if port <= 0 or port > 65535: + raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port)) + + return port + + +class InvalidAnswer(Exception): + def __init__(self, msg): + Exception.__init__(self) + self.msg = msg + def __str__(self): + return self.msg + +class InvalidParams(Exception): + def __init__(self, msg): + Exception.__init__(self) + self.msg = msg + def __str__(self): + return self.msg + +### END of Utils +#################### + +class EnvManager(object): + '''System environment and directory layout''' + def __init__(self): + self.install_path = os.path.dirname(os.path.abspath(__file__)) + self.top_dir = os.path.dirname(self.install_path) + self.bin_dir = os.path.join(self.install_path, 'seafile', 'bin') + self.central_config_dir = os.path.join(self.top_dir, 'conf') + self.central_pids_dir = os.path.join(self.top_dir, 'pids') + self.central_logs_dir = os.path.join(self.top_dir, 'logs') + Utils.must_mkdir(self.central_config_dir) + + def check_pre_condiction(self): + def error_if_not_exists(path): + if not os.path.exists(path): + Utils.error('"%s" not found' % path) + + paths = [ + os.path.join(self.install_path, 'seafile'), + os.path.join(self.install_path, 'seahub'), + os.path.join(self.install_path, 'runtime'), + ] + + for path in paths: + error_if_not_exists(path) + + if os.path.exists(ccnet_config.ccnet_dir): + Utils.error('Ccnet config dir \"%s\" already exists.' % ccnet_config.ccnet_dir) + + def get_seahub_env(self): + '''Prepare for seahub syncdb''' + env = dict(os.environ) + env['CCNET_CONF_DIR'] = ccnet_config.ccnet_dir + env['SEAFILE_CONF_DIR'] = seafile_config.seafile_dir + self.setup_python_path(env) + return env + + def setup_python_path(self, env): + '''And PYTHONPATH and CCNET_CONF_DIR/SEAFILE_CONF_DIR to env, which is + needed by seahub + + ''' + install_path = self.install_path + pro_pylibs_dir = os.path.join(install_path, 'pro', 'python') + extra_python_path = [ + pro_pylibs_dir, + + os.path.join(install_path, 'seahub', 'thirdpart'), + + os.path.join(install_path, 'seafile/lib/python3/site-packages'), + os.path.join(install_path, 'seafile/lib64/python3/site-packages'), + ] + + for path in extra_python_path: + Utils.prepend_env_value('PYTHONPATH', path, env=env) + + def get_binary_env(self): + '''Set LD_LIBRARY_PATH for seafile server executables''' + env = dict(os.environ) + lib_dir = os.path.join(self.install_path, 'seafile', 'lib') + lib64_dir = os.path.join(self.install_path, 'seafile', 'lib64') + Utils.prepend_env_value('LD_LIBRARY_PATH', lib_dir, env=env) + Utils.prepend_env_value('LD_LIBRARY_PATH', lib64_dir, env=env) + return env + +class AbstractConfigurator(object): + '''Abstract Base class for ccnet/seafile/seahub/db configurator''' + def __init__(self): + pass + + def ask_questions(self): + raise NotImplementedError + + def generate(self): + raise NotImplementedError + + +class AbstractDBConfigurator(AbstractConfigurator): + '''Abstract class for database related configuration''' + def __init__(self): + AbstractConfigurator.__init__(self) + self.mysql_host = 'localhost' + self.mysql_port = 3306 + self.unix_socket = "/var/run/mysqld/mysqld.sock" + + self.use_existing_db = False + + self.seafile_mysql_user = '' + self.seafile_mysql_password = '' + self.seafile_mysql_userhost = '127.0.0.1' + + self.root_password = '' + self.root_conn = '' + + self.ccnet_db_name = '' + self.seafile_db_name = '' + self.seahub_db_name = '' + + self.seahub_admin_email = '' + self.seahub_admin_password = '' + + @staticmethod + def ask_use_existing_db(): + def validate(choice): + if choice not in ['1', '2']: + raise InvalidAnswer('Please choose 1 or 2') + + return choice == '2' + + question = '''\ +------------------------------------------------------- +Please choose a way to initialize seafile databases: +------------------------------------------------------- +''' + + note = '''\ +[1] Create new ccnet/seafile/seahub databases +[2] Use existing ccnet/seafile/seahub databases +''' + return Utils.ask_question(question, + key='1 or 2', + note=note, + validate=validate) + + def validate_mysql_host(self, host): + if not re.match(r'^[a-zA-Z0-9_\-\.]+$', host): + raise InvalidAnswer('%s is not a valid host' % Utils.highlight(host)) + + if host == 'localhost': + host = '127.0.0.1' + return host + + def ask_mysql_host(self): + question = 'What is the host of mysql server?' + key = 'mysql server host' + default = 'localhost' + self.mysql_host = Utils.ask_question(question, + key=key, + default=default, + validate=self.validate_mysql_host) + + def validate_mysql_user_host(self, host): + MYSQL_HOST_RE = re.compile(r'^(%|[^.].+\..+[^.])$') + if not MYSQL_HOST_RE.match(host): + raise InvalidAnswer('invalid mysql user host: {}'.format(host)) + return host + + def ask_mysql_user_host(self): + self.seafile_mysql_userhost = Utils.ask_question( + 'From which hosts could the mysql account be used?', + key='mysql user host', + default='%', + validate=self.validate_mysql_user_host + ) + + def ask_mysql_port(self): + question = 'What is the port of mysql server?' + key = 'mysql server port' + default = '3306' + port = Utils.ask_question(question, + key=key, + default=default, + validate=Utils.validate_port) + + # self.check_mysql_server(host, port) + self.mysql_port = port + + def ask_mysql_host_port(self): + self.ask_mysql_host() + if self.mysql_host != '127.0.0.1': + self.ask_mysql_user_host() + self.ask_mysql_port() + + def check_mysql_server(self, host, port): + print('\nverifying mysql server running ... ', end=' ') + try: + dummy = pymysql.connect(host=host, port=port) + except Exception: + print() + raise InvalidAnswer('Failed to connect to mysql server at "%s:%s"' \ + % (host, port)) + + print('done') + + def check_mysql_user(self, user, password, host=None, unix_socket=None): + print('\nverifying password of user %s ... ' % user, end=' ') + kwargs = dict(port=self.mysql_port, + user=user, + passwd=password) + if unix_socket: + kwargs['unix_socket'] = unix_socket + else: + kwargs['host'] = host or self.mysql_host + + try: + conn = pymysql.connect(**kwargs) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' \ + % (user, e.args[1])) + else: + raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' \ + % (user, e)) + + print('done') + return conn + + def create_seahub_admin(self): + try: + conn = pymysql.connect(host=self.mysql_host, + port=self.mysql_port, + user=self.seafile_mysql_user, + passwd=self.seafile_mysql_password, + db=self.ccnet_db_name) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to connect to mysql database %s: %s' % (self.ccnet_db_name, e.args[1])) + else: + Utils.error('Failed to connect to mysql database %s: %s' % (self.ccnet_db_name, e)) + + cursor = conn.cursor() + sql = '''\ +CREATE TABLE IF NOT EXISTS EmailUser (id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, email VARCHAR(255), passwd CHAR(64), is_staff BOOL NOT NULL, is_active BOOL NOT NULL, ctime BIGINT, UNIQUE INDEX (email)) ENGINE=INNODB''' + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to create ccnet user table: %s' % e.args[1]) + else: + Utils.error('Failed to create ccnet user table: %s' % e) + + sql = '''REPLACE INTO EmailUser(email, passwd, is_staff, is_active, ctime) VALUES ('%s', '%s', 1, 1, 0)''' \ + % (seahub_config.admin_email, seahub_config.hashed_admin_password()) + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to create admin user: %s' % e.args[1]) + else: + Utils.error('Failed to create admin user: %s' % e) + + conn.commit() + + def ask_questions(self): + '''Ask questions and do database operations''' + raise NotImplementedError + + +class NewDBConfigurator(AbstractDBConfigurator): + '''Handles the case of creating new mysql databases for ccnet/seafile/seahub''' + def __init__(self): + AbstractDBConfigurator.__init__(self) + + def ask_questions(self): + self.ask_mysql_host_port() + + self.ask_root_password() + self.ask_seafile_mysql_user_password() + + self.ask_db_names() + + def generate(self): + if not self.mysql_user_exists(self.seafile_mysql_user): + self.create_user() + self.create_databases() + + def validate_root_passwd(self, password): + try: + self.root_conn = self.check_mysql_user('root', password) + except InvalidAnswer: + # For MariaDB on Ubuntu 16.04, the msyql root user can only be + # accessed from localhost with unix socket. So we retry with + # localhost when failing with 127.0.0.1. + if self.mysql_host == '127.0.0.1': + self.root_conn = self.check_mysql_user('root', password, unix_socket=self.unix_socket) + else: + raise + return password + + def ask_root_password(self): + question = 'What is the password of the mysql root user?' + key = 'root password' + self.root_password = Utils.ask_question(question, + key=key, + validate=self.validate_root_passwd, + password=True) + + def mysql_user_exists(self, user): + cursor = self.root_conn.cursor() + + sql = '''SELECT EXISTS(SELECT 1 FROM mysql.user WHERE user = '%s' and host = '%s')''' % \ + (user, self.seafile_mysql_userhost) + + try: + cursor.execute(sql) + return cursor.fetchall()[0][0] + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to check mysql user %s@%s: %s' % \ + (user, self.seafile_mysql_userhost, e.args[1])) + else: + Utils.error('Failed to check mysql user %s@%s: %s' % \ + (user, self.seafile_mysql_userhost, e)) + finally: + cursor.close() + + + def ask_seafile_mysql_user_password(self): + def validate(user): + if user == 'root': + raise InvalidAnswer( + 'Using mysql "root" user is not allowed for security reasons. Please specify a different database user.' + ) + else: + question = 'Enter the password for mysql user "%s":' % Utils.highlight(user) + key = 'password for %s' % user + password = Utils.ask_question(question, key=key, password=True) + # If the user already exists, check the password here + if self.mysql_user_exists(user): + self.check_mysql_user(user, password) + self.seafile_mysql_password = password + + return user + + + question = 'Enter the name for mysql user of seafile. It would be created if not exists.' + key = 'mysql user for seafile' + default = 'seafile' + self.seafile_mysql_user = Utils.ask_question(question, + key=key, + default=default, + validate=validate) + + def ask_db_name(self, program, default): + question = 'Enter the database name for %s:' % program + key = '%s database' % program + return Utils.ask_question(question, + key=key, + default=default, + validate=self.validate_db_name) + + def ask_db_names(self): + self.ccnet_db_name = self.ask_db_name('ccnet-server', 'ccnet-db') + self.seafile_db_name = self.ask_db_name('seafile-server', 'seafile-db') + self.seahub_db_name = self.ask_db_name('seahub', 'seahub-db') + + def validate_db_name(self, db_name): + return db_name + + def create_user(self): + cursor = self.root_conn.cursor() + sql = '''CREATE USER '{}'@'{}' IDENTIFIED BY '{}' '''.format( + self.seafile_mysql_user, + self.seafile_mysql_userhost, + self.seafile_mysql_password + ) + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to create mysql user {}@{}: {}'.format(self.seafile_mysql_user, self.seafile_mysql_userhost, e.args[1])) + else: + Utils.error('Failed to create mysql user {}@{}: {}'.format(self.seafile_mysql_user, self.seafile_mysql_userhost, e)) + finally: + cursor.close() + + + def create_db(self, db_name): + cursor = self.root_conn.cursor() + sql = '''CREATE DATABASE IF NOT EXISTS `%s` CHARACTER SET UTF8''' \ + % db_name + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to create database %s: %s' % (db_name, e.args[1])) + else: + Utils.error('Failed to create database %s: %s' % (db_name, e)) + finally: + cursor.close() + + def grant_db_permission(self, db_name): + cursor = self.root_conn.cursor() + sql = '''GRANT ALL PRIVILEGES ON `{}`.* to `{}`@`{}` '''.format( + db_name, + self.seafile_mysql_user, + self.seafile_mysql_userhost + ) + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to grant permission of database %s: %s' % (db_name, e.args[1])) + else: + Utils.error('Failed to grant permission of database %s: %s' % (db_name, e)) + finally: + cursor.close() + + def create_databases(self): + self.create_db(self.ccnet_db_name) + self.create_db(self.seafile_db_name) + self.create_db(self.seahub_db_name) + + if self.seafile_mysql_user != 'root': + self.grant_db_permission(self.ccnet_db_name) + self.grant_db_permission(self.seafile_db_name) + self.grant_db_permission(self.seahub_db_name) + + +class ExistingDBConfigurator(AbstractDBConfigurator): + '''Handles the case of use existing mysql databases for ccnet/seafile/seahub''' + def __init__(self): + AbstractDBConfigurator.__init__(self) + self.use_existing_db = True + + def ask_questions(self): + self.ask_mysql_host_port() + + self.ask_existing_mysql_user_password() + + self.ccnet_db_name = self.ask_db_name('ccnet') + self.seafile_db_name = self.ask_db_name('seafile') + self.seahub_db_name = self.ask_db_name('seahub') + + def generate(self): + pass + + def ask_existing_mysql_user_password(self): + def validate(user): + if user == 'root': + raise InvalidAnswer( + 'Using root is not allowed for security reasons. Please specify a different database user.' + ) + question = 'What is the password for mysql user "%s"?' % Utils.highlight(user) + key = 'password for %s' % user + password = Utils.ask_question(question, key=key, password=True) + self.check_mysql_user(user, password) + self.seafile_mysql_password = password + return user + + question = 'Which mysql user to use for seafile?' + key = 'mysql user for seafile' + self.seafile_mysql_user = Utils.ask_question(question, + key=key, + validate=validate) + + def validate_db_name(self, db_name): + self.check_user_db_access(db_name) + return db_name + + def ask_db_name(self, program): + question = 'Enter the existing database name for %s:' % program + key = '%s database' % program + return Utils.ask_question(question, + key=key, + validate=self.validate_db_name) + + def check_user_db_access(self, db_name): + user = self.seafile_mysql_user + password = self.seafile_mysql_password + + print('\nverifying user "%s" access to database %s ... ' % (user, db_name), end=' ') + try: + conn = pymysql.connect(host=self.mysql_host, + port=self.mysql_port, + user=user, + passwd=password, + db=db_name) + + cursor = conn.cursor() + cursor.execute('show tables') + cursor.close() + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + raise InvalidAnswer('Failed to access database %s using user "%s" and password "***": %s' \ + % (db_name, user, e.args[1])) + else: + raise InvalidAnswer('Failed to access database %s using user "%s" and password "***": %s' \ + % (db_name, user, e)) + + print('done') + + return conn + + +class CcnetConfigurator(AbstractConfigurator): + SERVER_NAME_REGEX = r'^[a-zA-Z0-9_\-]{3,15}$' + SERVER_IP_OR_DOMAIN_REGEX = r'^[^.].+\..+[^.]$' + + def __init__(self): + '''Initialize default values of ccnet configuration''' + AbstractConfigurator.__init__(self) + self.ccnet_dir = os.path.join(env_mgr.top_dir, 'ccnet') + self.port = 10001 + self.server_name = None + self.ip_or_domain = None + self.ccnet_conf = os.path.join(env_mgr.central_config_dir, 'ccnet.conf') + + def ask_questions(self): + if not self.server_name: + self.ask_server_name() + if not self.ip_or_domain: + self.ask_server_ip_or_domain() + # self.ask_port() + + def generate(self): + print('Generating ccnet configuration ...\n') + with open(self.ccnet_conf, 'w') as fp: + fp.write('[General]') + + self.generate_db_conf() + + Utils.must_mkdir(self.ccnet_dir) + + def generate_db_conf(self): + config = Utils.read_config(self.ccnet_conf) + # [Database] + # ENGINE= + # HOST= + # USER= + # PASSWD= + # DB= + db_section = 'Database' + if not config.has_section(db_section): + config.add_section(db_section) + config.set(db_section, 'ENGINE', 'mysql') + config.set(db_section, 'HOST', db_config.mysql_host) + config.set(db_section, 'PORT', str(db_config.mysql_port)) + config.set(db_section, 'USER', db_config.seafile_mysql_user) + config.set(db_section, 'PASSWD', db_config.seafile_mysql_password) + config.set(db_section, 'DB', db_config.ccnet_db_name) + config.set(db_section, 'CONNECTION_CHARSET', 'utf8') + + Utils.write_config(config, self.ccnet_conf) + + def validate_server_name(self, name): + if not re.match(self.SERVER_NAME_REGEX, name): + raise InvalidAnswer('%s is not a valid name' % Utils.highlight(name)) + return name + + def ask_server_name(self): + question = 'What is the name of the server? It will be displayed on the client.' + key = 'server name' + note = '3 - 15 letters or digits' + self.server_name = Utils.ask_question(question, + key=key, + note=note, + validate=self.validate_server_name) + + def validate_server_ip(self, ip_or_domain): + if not re.match(self.SERVER_IP_OR_DOMAIN_REGEX, ip_or_domain): + raise InvalidAnswer('%s is not a valid ip or domain' % ip_or_domain) + return ip_or_domain + + def ask_server_ip_or_domain(self): + question = 'What is the ip or domain of the server?' + key = 'This server\'s ip or domain' + note = 'For example: www.mycompany.com, 192.168.1.101' + self.ip_or_domain = Utils.ask_question(question, + key=key, + note=note, + validate=self.validate_server_ip) + + def ask_port(self): + def validate(port): + return Utils.validate_port(port) + + question = 'Which port do you want to use for the ccnet server?' + key = 'ccnet server port' + default = 10001 + self.port = Utils.ask_question(question, + key=key, + default=default, + validate=validate) + + def do_syncdb(self): + print('----------------------------------------') + print('Now creating ccnet database tables ...\n') + print('----------------------------------------') + + try: + conn = pymysql.connect(host=db_config.mysql_host, + port=db_config.mysql_port, + user=db_config.seafile_mysql_user, + passwd=db_config.seafile_mysql_password, + db=db_config.ccnet_db_name) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.ccnet_db_name, e.args[1])) + else: + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.ccnet_db_name, e)) + + cursor = conn.cursor() + + sql_file = os.path.join(env_mgr.install_path, 'sql', 'mysql', 'ccnet.sql') + with open(sql_file, 'r') as fp: + content = fp.read() + + sqls = [line.strip() for line in content.split(';') if line.strip()] + for sql in sqls: + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to init ccnet database: %s' % e.args[1]) + else: + Utils.error('Failed to init ccnet database: %s' % e) + + conn.commit() + + +class SeafileConfigurator(AbstractConfigurator): + def __init__(self): + AbstractConfigurator.__init__(self) + self.seafile_dir = os.path.join(env_mgr.top_dir, 'seafile-data') + self.port = 12001 + self.fileserver_port = None + self.seafile_conf = os.path.join(env_mgr.central_config_dir, 'seafile.conf') + + def ask_questions(self): + # if not self.seafile_dir: + # self.ask_seafile_dir() + # self.ask_port() + if not self.fileserver_port: + self.ask_fileserver_port() + + def generate(self): + print('Generating seafile configuration ...\n') + with open(self.seafile_conf, 'w') as fp: + fp.write('[fileserver]\nport=%d\n' % self.fileserver_port) + + self.generate_db_conf() + + ## use default seafile-data path: seafile_data_dir=${TOPDIR}/seafile-data + + print('done') + + def generate_db_conf(self): + config = Utils.read_config(self.seafile_conf) + # [database] + # type= + # host= + # user= + # password= + # db_name= + # unix_socket= + db_section = 'database' + if not config.has_section(db_section): + config.add_section(db_section) + config.set(db_section, 'type', 'mysql') + config.set(db_section, 'host', db_config.mysql_host) + config.set(db_section, 'port', str(db_config.mysql_port)) + config.set(db_section, 'user', db_config.seafile_mysql_user) + config.set(db_section, 'password', db_config.seafile_mysql_password) + config.set(db_section, 'db_name', db_config.seafile_db_name) + config.set(db_section, 'connection_charset', 'utf8') + + Utils.write_config(config, self.seafile_conf) + + def validate_seafile_dir(self, path): + if os.path.exists(path): + raise InvalidAnswer('%s already exists' % Utils.highlight(path)) + return path + + def ask_seafile_dir(self): + question = 'Where do you want to put your seafile data?' + key = 'seafile-data' + note = 'Please use a volume with enough free space' + default = os.path.join(env_mgr.top_dir, 'seafile-data') + self.seafile_dir = Utils.ask_question(question, + key=key, + note=note, + default=default, + validate=self.validate_seafile_dir) + + def ask_port(self): + def validate(port): + port = Utils.validate_port(port) + if port == ccnet_config.port: + raise InvalidAnswer('%s is used by ccnet server, choose another one' \ + % Utils.highlight(port)) + return port + + question = 'Which port do you want to use for the seafile server?' + key = 'seafile server port' + default = 12001 + self.port = Utils.ask_question(question, + key=key, + default=default, + validate=validate) + + def ask_fileserver_port(self): + question = 'Which port do you want to use for the seafile fileserver?' + key = 'seafile fileserver port' + default = 8082 + self.fileserver_port = Utils.ask_question(question, + key=key, + default=default, + validate=Utils.validate_port) + + def do_syncdb(self): + print('----------------------------------------') + print('Now creating seafile database tables ...\n') + print('----------------------------------------') + + try: + conn = pymysql.connect(host=db_config.mysql_host, + port=db_config.mysql_port, + user=db_config.seafile_mysql_user, + passwd=db_config.seafile_mysql_password, + db=db_config.seafile_db_name) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.seafile_db_name, e.args[1])) + else: + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.seafile_db_name, e)) + + cursor = conn.cursor() + + sql_file = os.path.join(env_mgr.install_path, 'sql', 'mysql', 'seafile.sql') + with open(sql_file, 'r') as fp: + content = fp.read() + + sqls = [line.strip() for line in content.split(';') if line.strip()] + for sql in sqls: + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to init seafile database: %s' % e.args[1]) + else: + Utils.error('Failed to init seafile database: %s' % e) + + conn.commit() + +class SeahubConfigurator(AbstractConfigurator): + def __init__(self): + AbstractConfigurator.__init__(self) + self.admin_email = '' + self.admin_password = '' + self.seahub_settings_py = os.path.join(env_mgr.central_config_dir, 'seahub_settings.py') + + def hashed_admin_password(self): + return hashlib.sha1(self.admin_password).hexdigest() # pylint: disable=E1101 + + def ask_questions(self): + pass + + def generate(self): + '''Generating seahub_settings.py''' + print('Generating seahub configuration ...\n') + with open(self.seahub_settings_py, 'w') as fp: + self.write_utf8_comment(fp) + fp.write('\n') + self.write_secret_key(fp) + fp.write('\n') + self.write_database_config(fp) + + def write_utf8_comment(self, fp): + fp.write('# -*- coding: utf-8 -*-') + + def write_secret_key(self, fp): + script = os.path.join(env_mgr.install_path, 'seahub/tools/secret_key_generator.py') + cmd = [ + Utils.get_python_executable(), + script, + ] + key = Utils.get_command_output(cmd).strip() + fp.write('SECRET_KEY = "%s"' % key) + + def write_database_config(self, fp): + template = '''\ +\nDATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.mysql', + 'NAME': '%(name)s', + 'USER': '%(username)s', + 'PASSWORD': '%(password)s', + 'HOST': '%(host)s', + 'PORT': '%(port)s', + 'OPTIONS': {'charset': 'utf8mb4'}, + } +} + +''' + text = template % dict(name=db_config.seahub_db_name, + username=db_config.seafile_mysql_user, + password=db_config.seafile_mysql_password, + host=db_config.mysql_host, + port=db_config.mysql_port) + + fp.write(text) + + def ask_admin_email(self): + print() + print('----------------------------------------') + print('Now let\'s create the admin account') + print('----------------------------------------') + def validate(email): + # whitespace is not allowed + if re.match(r'[\s]', email): + raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email)) + # must be a valid email address + if not re.match(r'^.+@.*\..+$', email): + raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email)) + + return email + + key = 'admin email' + question = 'What is the ' + Utils.highlight('email') + ' for the admin account?' + self.admin_email = Utils.ask_question(question, + key=key, + validate=validate) + + def ask_admin_password(self): + def validate(password): + key = 'admin password again' + question = 'Enter the ' + Utils.highlight('password again:') + password_again = Utils.ask_question(question, + key=key, + password=True) + + if password_again != password: + raise InvalidAnswer('password mismatch') + + return password + + key = 'admin password' + question = 'What is the ' + Utils.highlight('password') + ' for the admin account?' + self.admin_password = Utils.ask_question(question, + key=key, + password=True, + validate=validate) + + def do_syncdb(self): + print('----------------------------------------') + print('Now creating seahub database tables ...\n') + print('----------------------------------------') + + try: + conn = pymysql.connect(host=db_config.mysql_host, + port=db_config.mysql_port, + user=db_config.seafile_mysql_user, + passwd=db_config.seafile_mysql_password, + db=db_config.seahub_db_name) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.seahub_db_name, e.args[1])) + else: + Utils.error('Failed to connect to mysql database %s: %s' % (db_config.seahub_db_name, e)) + + cursor = conn.cursor() + + sql_file = os.path.join(env_mgr.install_path, 'seahub', 'sql', 'mysql.sql') + with open(sql_file, 'r') as fp: + content = fp.read() + + sqls = [line.strip() for line in content.split(';') if line.strip()] + for sql in sqls: + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to init seahub database: %s' % e.args[1]) + else: + Utils.error('Failed to init seahub database: %s' % e) + + conn.commit() + + def prepare_avatar_dir(self): + # media_dir=${INSTALLPATH}/seahub/media + # orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + # dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # if [[ ! -d ${dest_avatar_dir} ]]; then + # mkdir -p "${TOPDIR}/seahub-data" + # mv "${orig_avatar_dir}" "${dest_avatar_dir}" + # ln -s ../../../seahub-data/avatars ${media_dir} + # fi + + try: + media_dir = os.path.join(env_mgr.install_path, 'seahub', 'media') + orig_avatar_dir = os.path.join(media_dir, 'avatars') + + seahub_data_dir = os.path.join(env_mgr.top_dir, 'seahub-data') + dest_avatar_dir = os.path.join(seahub_data_dir, 'avatars') + + if os.path.exists(dest_avatar_dir): + return + + if not os.path.exists(seahub_data_dir): + os.mkdir(seahub_data_dir) + + shutil.move(orig_avatar_dir, dest_avatar_dir) + os.symlink('../../../seahub-data/avatars', orig_avatar_dir) + except Exception as e: + Utils.error('Failed to prepare seahub avatars dir: %s' % e) + +class SeafDavConfigurator(AbstractConfigurator): + def __init__(self): + AbstractConfigurator.__init__(self) + self.seafdav_conf = None + + def ask_questions(self): + pass + + def generate(self): + self.seafdav_conf = os.path.join(env_mgr.central_config_dir, 'seafdav.conf') + text = ''' +[WEBDAV] +enabled = false +port = 8080 +share_name = / +''' + + with open(self.seafdav_conf, 'w') as fp: + fp.write(text) + +class GunicornConfigurator(AbstractConfigurator): + def __init__(self): + AbstractConfigurator.__init__(self) + self.gunicorn_conf = None + + def ask_questions(self): + pass + + def generate(self): + self.gunicorn_conf = os.path.join(env_mgr.central_config_dir, 'gunicorn.conf.py') + template = ''' +import os + +daemon = True +workers = 5 + +# default localhost:8000 +bind = "127.0.0.1:8000" + +# Pid +pids_dir = '%(pids_dir)s' +pidfile = os.path.join(pids_dir, 'seahub.pid') + +# for file upload, we need a longer timeout value (default is only 30s, too short) +timeout = 1200 + +limit_request_line = 8190 +''' + + text = template % dict(pids_dir=env_mgr.central_pids_dir, + logs_dir=env_mgr.central_logs_dir) + + with open(self.gunicorn_conf, 'w') as fp: + fp.write(text) + +class UserManualHandler(object): + def __init__(self): + self.src_docs_dir = os.path.join(env_mgr.install_path, 'seafile', 'docs') + self.library_template_dir = None + + def copy_user_manuals(self): + self.library_template_dir = os.path.join(seafile_config.seafile_dir, 'library-template') + Utils.must_mkdir(self.library_template_dir) + + pattern = os.path.join(self.src_docs_dir, '*.doc') + + for doc in glob.glob(pattern): + Utils.must_copy(doc, self.library_template_dir) + +def report_config(): + print() + print('---------------------------------') + print('This is your configuration') + print('---------------------------------') + print() + + template = '''\ + server name: %(server_name)s + server ip/domain: %(ip_or_domain)s + + seafile data dir: %(seafile_dir)s + fileserver port: %(fileserver_port)s + + database: %(use_existing_db)s + ccnet database: %(ccnet_db_name)s + seafile database: %(seafile_db_name)s + seahub database: %(seahub_db_name)s + database user: %(db_user)s + +''' + config = { + 'server_name' : ccnet_config.server_name, + 'ip_or_domain' : ccnet_config.ip_or_domain, + + 'seafile_dir' : seafile_config.seafile_dir, + 'fileserver_port' : seafile_config.fileserver_port, + + 'admin_email' : seahub_config.admin_email, + + + 'use_existing_db': 'use existing' if db_config.use_existing_db else 'create new', + 'ccnet_db_name': db_config.ccnet_db_name, + 'seafile_db_name': db_config.seafile_db_name, + 'seahub_db_name': db_config.seahub_db_name, + 'db_user': db_config.seafile_mysql_user + } + + print(template % config) + + if need_pause: + print() + print('---------------------------------') + print('Press ENTER to continue, or Ctrl-C to abort') + print('---------------------------------') + + input() + + +def create_seafile_server_symlink(): + print('\ncreating seafile-server-latest symbolic link ... ', end=' ') + seafile_server_symlink = os.path.join(env_mgr.top_dir, 'seafile-server-latest') + try: + os.symlink(os.path.basename(env_mgr.install_path), seafile_server_symlink) + except Exception as e: + print('\n') + Utils.error('Failed to create symbolic link %s: %s' % (seafile_server_symlink, e)) + else: + print('done\n\n') + +def set_file_perm(): + filemode = 0o600 + dirmode = 0o700 + files = [ + seahub_config.seahub_settings_py, + ] + dirs = [ + env_mgr.central_config_dir, + ccnet_config.ccnet_dir, + seafile_config.seafile_dir, + seahub_config.seahub_settings_py, + ] + for fpath in files: + os.chmod(fpath, filemode) + for dpath in dirs: + os.chmod(dpath, dirmode) + +env_mgr = EnvManager() +ccnet_config = CcnetConfigurator() +seafile_config = SeafileConfigurator() +seafdav_config = SeafDavConfigurator() +gunicorn_config = GunicornConfigurator() +seahub_config = SeahubConfigurator() +user_manuals_handler = UserManualHandler() +# Would be created after AbstractDBConfigurator.ask_use_existing_db() +db_config = None +need_pause = True + +def get_param_val(arg, env, default=None): + return arg or os.environ.get(env, default) + +def check_params(args): + server_name = 'seafile' + ccnet_config.server_name = ccnet_config.validate_server_name(server_name) + + server_ip = get_param_val(args.server_ip, 'SERVER_IP', '127.0.0.1') + ccnet_config.ip_or_domain = ccnet_config.validate_server_ip(server_ip) + + fileserver_port = get_param_val(args.fileserver_port, 'FILESERVER_PORT', '8082') + seafile_config.fileserver_port = Utils.validate_port(fileserver_port) + + seafile_dir = get_param_val(args.seafile_dir, 'SEAFILE_DIR', + os.path.join(env_mgr.top_dir, 'seafile-data')) + seafile_config.seafile_dir = seafile_config.validate_seafile_dir(seafile_dir) + + global db_config + + use_existing_db = get_param_val(args.use_existing_db, 'USE_EXISTING_DB', '0') + # pylint: disable=redefined-variable-type + if use_existing_db == '0': + db_config = NewDBConfigurator() + elif use_existing_db == '1': + db_config = ExistingDBConfigurator() + else: + raise InvalidParams('Invalid use existing db parameter, the value can only be 0 or 1') + + mysql_host = get_param_val(args.mysql_host, 'MYSQL_HOST', '127.0.0.1') + if not mysql_host: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing mysql host parameter') + db_config.mysql_host = db_config.validate_mysql_host(mysql_host) + + mysql_port = get_param_val(args.mysql_port, 'MYSQL_PORT', '3306') + db_config.mysql_port = Utils.validate_port(mysql_port) + + mysql_user = get_param_val(args.mysql_user, 'MYSQL_USER') + if not mysql_user: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing mysql user name parameter') + + mysql_user_passwd = get_param_val(args.mysql_user_passwd, 'MYSQL_USER_PASSWD') + if not mysql_user_passwd: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing mysql user password parameter') + + ccnet_db = get_param_val(args.ccnet_db, 'CCNET_DB', 'ccnet_db') + if not ccnet_db: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing ccnet db name parameter') + + seafile_db = get_param_val(args.seafile_db, 'SEAFILE_DB', 'seafile_db') + if not seafile_db: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing seafile db name parameter') + + seahub_db = get_param_val(args.seahub_db, 'SEAHUB_DB', 'seahub_db') + if not seahub_db: + raise InvalidParams('Incomplete mysql configuration parameters, ' \ + 'missing seahub db name parameter') + + mysql_user_host = get_param_val(args.mysql_user_host, 'MYSQL_USER_HOST') + mysql_root_passwd = get_param_val(args.mysql_root_passwd, 'MYSQL_ROOT_PASSWD') + + if db_config.use_existing_db: + db_config.check_mysql_user(mysql_user, mysql_user_passwd) + db_config.seafile_mysql_user = mysql_user + db_config.seafile_mysql_password = mysql_user_passwd + db_config.ccnet_db_name = db_config.validate_db_name(ccnet_db) + db_config.seafile_db_name = db_config.validate_db_name(seafile_db) + db_config.seahub_db_name = db_config.validate_db_name(seahub_db) + else: + if db_config.mysql_host != '127.0.0.1' and not mysql_user_host: + raise InvalidParams('mysql user host parameter is missing in creating new db mode') + if not mysql_user_host: + db_config.seafile_mysql_userhost = '127.0.0.1' + else: + db_config.seafile_mysql_userhost = db_config.validate_mysql_user_host(mysql_user_host) + + if not mysql_root_passwd and "MYSQL_ROOT_PASSWD" not in os.environ: + raise InvalidParams('mysql root password parameter is missing in creating new db mode') + db_config.root_password = db_config.validate_root_passwd(mysql_root_passwd) + + if mysql_user == 'root': + db_config.seafile_mysql_user = 'root' + db_config.seafile_mysql_password = db_config.root_password + else: + if db_config.mysql_user_exists(mysql_user): + db_config.check_mysql_user(mysql_user, mysql_user_passwd) + db_config.seafile_mysql_user = mysql_user + db_config.seafile_mysql_password = mysql_user_passwd + db_config.ccnet_db_name = ccnet_db + db_config.seafile_db_name = seafile_db + db_config.seahub_db_name = seahub_db + + global need_pause + need_pause = False + + +def main(): + if len(sys.argv) > 2 and sys.argv[1] == 'auto': + sys.argv.remove('auto') + parser = argparse.ArgumentParser() + parser.add_argument('-n', '--server-name', help='server name') + parser.add_argument('-i', '--server-ip', help='server ip or domain') + parser.add_argument('-p', '--fileserver-port', help='fileserver port') + parser.add_argument('-d', '--seafile-dir', help='seafile dir to store seafile data') + parser.add_argument('-e', '--use-existing-db', + help='use mysql existing dbs or create new dbs, ' + '0: create new dbs 1: use existing dbs') + parser.add_argument('-o', '--mysql-host', help='mysql host') + parser.add_argument('-t', '--mysql-port', help='mysql port') + parser.add_argument('-u', '--mysql-user', help='mysql user name') + parser.add_argument('-w', '--mysql-user-passwd', help='mysql user password') + parser.add_argument('-q', '--mysql-user-host', help='mysql user host') + parser.add_argument('-r', '--mysql-root-passwd', help='mysql root password') + parser.add_argument('-c', '--ccnet-db', help='ccnet db name') + parser.add_argument('-s', '--seafile-db', help='seafile db name') + parser.add_argument('-b', '--seahub-db', help='seahub db name') + + args = parser.parse_args() + + try: + check_params(args) + except (InvalidAnswer, InvalidParams) as e: + print(Utils.highlight('\n%s\n' % e)) + sys.exit(-1) + + global db_config + + if need_pause: + Utils.welcome() + warnings.filterwarnings('ignore', category=pymysql.Warning) + + env_mgr.check_pre_condiction() + + # Part 1: collect configuration + ccnet_config.ask_questions() + seafile_config.ask_questions() + seahub_config.ask_questions() + + # pylint: disable=redefined-variable-type + if not db_config: + if AbstractDBConfigurator.ask_use_existing_db(): + db_config = ExistingDBConfigurator() + else: + db_config = NewDBConfigurator() + + db_config.ask_questions() + + report_config() + + # Part 2: generate configuration + db_config.generate() + ccnet_config.generate() + seafile_config.generate() + seafdav_config.generate() + gunicorn_config.generate() + seahub_config.generate() + + ccnet_config.do_syncdb() + seafile_config.do_syncdb() + seahub_config.do_syncdb() + seahub_config.prepare_avatar_dir() + # db_config.create_seahub_admin() + user_manuals_handler.copy_user_manuals() + create_seafile_server_symlink() + + set_file_perm() + + report_success() + +def report_success(): + message = '''\ + + +----------------------------------------------------------------- +Your seafile server configuration has been finished successfully. +----------------------------------------------------------------- + +run seafile server: ./seafile.sh { start | stop | restart } +run seahub server: ./seahub.sh { start | stop | restart } + +----------------------------------------------------------------- +If you are behind a firewall, remember to allow input/output of these tcp ports: +----------------------------------------------------------------- + +port of seafile fileserver: %(fileserver_port)s +port of seahub: 8000 + +When problems occur, Refer to + + %(server_manual_http)s + +for information. + +''' + + print(message % dict(fileserver_port=seafile_config.fileserver_port, + server_manual_http=SERVER_MANUAL_HTTP)) + + +if __name__ == '__main__': + try: + main() + except KeyboardInterrupt: + print() + print(Utils.highlight('The setup process is aborted')) + print() diff --git a/scripts/setup-seafile-mysql.sh b/scripts/setup-seafile-mysql.sh new file mode 100755 index 0000000000..50c40f6cd0 --- /dev/null +++ b/scripts/setup-seafile-mysql.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +######## +### This script is a wrapper for setup-seafile-mysql.py +######## + +set -e + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") + +cd "$INSTALLPATH" + +python_script=setup-seafile-mysql.py + +function err_and_quit () { + printf "\n\n\033[33mError occured during setup. \nPlease fix possible problems and run the script again.\033[m\n\n" + exit 1; +} + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + err_and_quit + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + err_and_quit + fi + fi +} + +function check_python () { + echo "Checking python on this machine ..." + check_python_executable + echo +} + +check_python; + +export PYTHON=$PYTHON + +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH + +exec $PYTHON "$python_script" "$@" diff --git a/scripts/setup-seafile.sh b/scripts/setup-seafile.sh new file mode 100755 index 0000000000..f9c954e38e --- /dev/null +++ b/scripts/setup-seafile.sh @@ -0,0 +1,740 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") +INSTALLPATH=$(dirname "${SCRIPT}") +TOPDIR=$(dirname "${INSTALLPATH}") +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +default_pids_dir=${TOPDIR}/pids +default_logs_dir=${TOPDIR}/logs + +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +server_manual_http='https://download.seafile.com/published/seafile-manual/home.md' + +function welcome () { + echo "-----------------------------------------------------------------" + echo "This script will guide you to config and setup your seafile server." + echo -e "\nMake sure you have read seafile server manual at \n\n\t${server_manual_http}\n" + echo -e "Note: This script will guide your to setup seafile server using sqlite3," + echo "which may have problems if your disk is on a NFS/CIFS/USB." + echo "In these cases, we suggest you setup seafile server using MySQL." + echo + echo "Press [ENTER] to continue" + echo "-----------------------------------------------------------------" + read dummy + echo +} + +function err_and_quit () { + printf "\n\n\033[33mError occured during setup. \nPlease fix possible issues and run the script again.\033[m\n\n" + exit 1; +} + +function on_ctrl_c_pressed () { + printf "\n\n\033[33mYou have pressed Ctrl-C. Setup is interrupted.\033[m\n\n" + exit 1; +} + +# clean newly created ccnet/seafile configs when exit on SIGINT +trap on_ctrl_c_pressed 2 + +function check_sanity () { + if ! [[ -d ${INSTALLPATH}/seahub && -d ${INSTALLPATH}/seafile \ + && -d ${INSTALLPATH}/runtime ]]; then + echo + echo "The seafile-server diretory doesn't contain all needed files." + echo "Please make sure you have extracted all files and folders from tarball." + err_and_quit; + fi +} + +function read_yes_no () { + printf "[yes|no] " + read yesno; + while [[ "${yesno}" != "yes" && "${yesno}" != "no" ]] + do + printf "please answer [yes|no] " + read yesno; + done + + if [[ "${yesno}" == "no" ]]; then + return 1; + else + return 0; + fi +} + +function check_existing_ccnet () { + if [[ -d ${default_ccnet_conf_dir} ]]; then + echo "\033[31m Error: \033[0m Ccnet config dir \"${default_ccnet_conf_dir}\" already exists." + echo + exit 1; + fi + echo +} + +function check_existing_seafile () { + if [[ -d ${default_seafile_data_dir} ]]; then + echo "\033[31m Error: \033[0m Seafile server data dir \"${default_seafile_data_dir}\" already exists." + echo + exit 1; + fi + echo +} + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + err_and_quit + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + err_and_quit + fi + fi + + echo "Find python: $PYTHON" + echo +} + +function check_python_module () { + module=$1 + name=$2 + hint=$3 + printf " Checking python module: ${name} ... " + if ! $PYTHON -c "import ${module}" 2>/dev/null 1>&2; then + echo + printf "\033[33m ${name} \033[m is not installed, Please install it first.\n" + if [[ "${hint}" != "" ]]; then + printf "${hint}" + echo + fi + err_and_quit; + fi + echo -e "Done." +} + +function check_python () { + echo "Checking python on this machine ..." + check_python_executable + check_python_module sqlite3 python-sqlite3 + echo +} + +function check_sqlite3 () { + echo -n "Checking for sqlite3 ..." + if ! which sqlite3 2>/dev/null 1>&2; then + echo -e "\nSqlite3 is not found. install it first.\n" + echo "On Debian/Ubuntu: apt-get install sqlite3" + echo "On CentOS/RHEL: yum install sqlite" + err_and_quit; + fi + printf "Done.\n\n" +} + +function check_system_dependency () { + printf "Checking packages needed by seafile ...\n\n" + check_python; + check_sqlite3; + printf "Checking Done.\n\n" +} + +function ask_question () { + question=$1 + default=$2 + key=$3 + printf "${question}" + printf "\n" + if [[ "${default}" != "" && "${default}" != "nodefault" ]] ; then + printf "[default: ${default} ] " + elif [[ "${key}" != "" ]]; then + printf "[${key}]: " + fi +} + +function get_server_name () { + question="What would you like to use as the name of this seafile server?\nYour seafile users will be able to see the name in their seafile client." + hint="You can use a-z, A-Z, 0-9, _ and -, and the length should be 3 ~ 15" + ask_question "${question}\n${hint}" "nodefault" "server name" + read server_name + if [[ "${server_name}" == "" ]]; then + echo + echo "server name cannot be empty" + get_server_name + elif [[ ! ${server_name} =~ ^[a-zA-Z0-9_-]{3,14}$ ]]; then + printf "\n\033[33m${server_name}\033[m is not a valid name.\n" + get_server_name; + fi + echo +} + +function get_server_ip_or_domain () { + question="What is the ip or domain of this server?\nFor example, www.mycompany.com, or, 192.168.1.101" + ask_question "${question}\n" "nodefault" "This server's ip or domain" + read ip_or_domain + if [[ "${ip_or_domain}" == "" ]]; then + echo + echo "ip or domain cannot be empty" + get_server_ip_or_domain + fi + echo +} + +# function get_ccnet_server_port () { +# question="What tcp port do you want to use for ccnet server?" +# hint="10001 is the recommended port." +# default="10001" +# ask_question "${question}\n${hint}" "${default}" +# read server_port +# if [[ "${server_port}" == "" ]]; then +# server_port="${default}" +# fi +# if [[ ! ${server_port} =~ ^[0-9]+$ ]]; then +# echo "\"${server_port}\" is not a valid port number. " +# get_ccnet_server_port +# fi +# echo +# } + +# function get_seafile_server_port () { +# question="What tcp port would you like to use for seafile server?" +# hint="12001 is the recommended port." +# default="12001" +# ask_question "${question}\n${hint}" "${default}" +# read seafile_server_port +# if [[ "${seafile_server_port}" == "" ]]; then +# seafile_server_port="${default}" +# fi +# if [[ ! ${seafile_server_port} =~ ^[0-9]+$ ]]; then +# echo "\"${seafile_server_port}\" is not a valid port number. " +# get_seafile_server_port +# fi +# echo +# } + +function get_fileserver_port () { + question="What tcp port do you want to use for seafile fileserver?" + hint="8082 is the recommended port." + default="8082" + ask_question "${question}\n${hint}" "${default}" + read fileserver_port + if [[ "${fileserver_port}" == "" ]]; then + fileserver_port="${default}" + fi + if [[ ! ${fileserver_port} =~ ^[0-9]+$ ]]; then + echo "\"${fileserver_port}\" is not a valid port number. " + get_fileserver_port + fi + echo +} + + +# function get_seafile_data_dir () { +# question="Where would you like to store your seafile data?" +# note="Please use a volume with enough free space." +# default=${default_seafile_data_dir} +# ask_question "${question} \n\033[33mNote: \033[m${note}" "${default}" +# read seafile_data_dir +# if [[ "${seafile_data_dir}" == "" ]]; then +# seafile_data_dir=${default} +# fi +# +# if [[ -d ${seafile_data_dir} && -f ${seafile_data_dir}/seafile.conf ]]; then +# echo +# echo "It seems that you have already existing seafile data in ${seafile_data_dir}." +# echo "Would you like to use the existing seafile data?" +# if ! read_yes_no; then +# echo "You have chosen not to use existing seafile data in ${seafile_data_dir}" +# echo "You need to specify a different seafile data directory or remove ${seafile_data_dir} before continuing." +# get_seafile_data_dir +# else +# use_existing_seafile="true" +# fi +# elif [[ -d ${seafile_data_dir} && $(ls -A ${seafile_data_dir}) != "" ]]; then +# echo +# echo "${seafile_data_dir} is an existing non-empty directory. Please specify a different directory" +# echo +# get_seafile_data_dir +# elif [[ ! ${seafile_data_dir} =~ ^/ ]]; then +# echo +# echo "\"${seafile_data_dir}\" is not an absolute path. Please specify an absolute path." +# echo +# get_seafile_data_dir +# elif [[ ! -d $(dirname ${seafile_data_dir}) ]]; then +# echo +# echo "The path $(dirname ${seafile_data_dir}) does not exist." +# echo +# get_seafile_data_dir +# fi +# echo +# } + +function gen_ccnet_conf () { + mkdir -p ${default_conf_dir} + ccnet_conf=${default_conf_dir}/ccnet.conf + if ! $(cat > ${ccnet_conf} < ${seafile_conf} < ${gunicorn_conf} < ${seafdav_conf} < "${default_ccnet_conf_dir}/seafile.ini" + +# ------------------------------------------- +# Generate gunicorn.conf.py +# ------------------------------------------- + +gen_gunicorn_conf; + +# ------------------------------------------- +# Generate seafevents.conf +# ------------------------------------------- + +gen_seafdav_conf; + +# ------------------------------------------- +# generate seahub/settings.py +# ------------------------------------------- +dest_settings_py=${TOPDIR}/conf/seahub_settings.py +seahub_secret_keygen=${INSTALLPATH}/seahub/tools/secret_key_generator.py + +if [[ ! -f ${dest_settings_py} ]]; then + key=$($PYTHON "${seahub_secret_keygen}") + cat > ${dest_settings_py} </dev/null 1>&2; then + echo "Failed to sync ccnet groupmgr database." + err_and_quit; +fi + +ccnet_config_db=${TOPDIR}/ccnet/misc/config.db +ccnet_config_sql=${INSTALLPATH}/sql/sqlite/config.sql +if ! sqlite3 ${ccnet_config_db} ".read ${ccnet_config_sql}" 2>/dev/null 1>&2; then + echo "Failed to sync ccnet config database." + err_and_quit; +fi + +ccnet_org_db=${TOPDIR}/ccnet/OrgMgr/orgmgr.db +ccnet_org_sql=${INSTALLPATH}/sql/sqlite/org.sql +if ! sqlite3 ${ccnet_org_db} ".read ${ccnet_org_sql}" 2>/dev/null 1>&2; then + echo "Failed to sync ccnet org database." + err_and_quit; +fi + +ccnet_user_db=${TOPDIR}/ccnet/PeerMgr/usermgr.db +ccnet_user_sql=${INSTALLPATH}/sql/sqlite/user.sql +if ! sqlite3 ${ccnet_user_db} ".read ${ccnet_user_sql}" 2>/dev/null 1>&2; then + echo "Failed to sync ccnet user database." + err_and_quit; +fi + +seafile_db=${TOPDIR}/seafile-data/seafile.db +seafile_sql=${INSTALLPATH}/sql/sqlite/seafile.sql +if ! sqlite3 ${seafile_db} ".read ${seafile_sql}" 2>/dev/null 1>&2; then + echo "Failed to sync seafile database." + err_and_quit; +fi + +seahub_db=${TOPDIR}/seahub.db +seahub_sqls=${INSTALLPATH}/seahub/sql/sqlite3.sql +if ! sqlite3 ${seahub_db} ".read ${seahub_sqls}" 2>/dev/null 1>&2; then + echo "Failed to sync seahub database." + err_and_quit; +fi +echo +echo "Done." + +# prepare avatar folder + +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +# Make a seafile-server symlink, like this: +# /data/haiwen/ +# -- seafile-server-2.0.4 +# -- seafile-server-latest # symlink to 2.0.4 +seafile_server_symlink=${TOPDIR}/seafile-server-latest +echo +echo -n "creating seafile-server-latest symbolic link ... " +if ! ln -s $(basename ${INSTALLPATH}) ${seafile_server_symlink}; then + echo + echo + echo "Failed to create symbolic link ${seafile_server_symlink}" + err_and_quit; +fi +echo "done" +echo + +chmod 0600 "$dest_settings_py" +chmod 0700 "$default_ccnet_conf_dir" +chmod 0700 "$default_seafile_data_dir" +chmod 0700 "$default_conf_dir" + +# ------------------------------------------- +# copy user manuals to library template +# ------------------------------------------- +copy_user_manuals; + +# ------------------------------------------- +# final message +# ------------------------------------------- + +sleep 1 + +echo +echo "-----------------------------------------------------------------" +echo "Your seafile server configuration has been completed successfully." +echo "-----------------------------------------------------------------" +echo +echo "run seafile server: ./seafile.sh { start | stop | restart }" +echo "run seahub server: ./seahub.sh { start | stop | restart }" +echo +echo "-----------------------------------------------------------------" +echo "If the server is behind a firewall, remember to open these tcp ports:" +echo "-----------------------------------------------------------------" +echo +echo "port of seafile fileserver: ${fileserver_port}" +echo "port of seahub: 8000" +echo +echo -e "When problems occur, refer to\n" +echo -e " ${server_manual_http}\n" +echo "for more information." +echo diff --git a/scripts/sql/mysql/ccnet.sql b/scripts/sql/mysql/ccnet.sql new file mode 100644 index 0000000000..6cb34653de --- /dev/null +++ b/scripts/sql/mysql/ccnet.sql @@ -0,0 +1,104 @@ +CREATE TABLE IF NOT EXISTS Binding ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + email VARCHAR(255), + peer_id CHAR(41), + UNIQUE INDEX (peer_id), + INDEX (email(20)) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS EmailUser ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + email VARCHAR(255), + passwd VARCHAR(256), + is_staff BOOL NOT NULL, + is_active BOOL NOT NULL, + ctime BIGINT, + reference_id VARCHAR(255), + UNIQUE INDEX (email), + UNIQUE INDEX (reference_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS `Group` ( + `group_id` BIGINT PRIMARY KEY AUTO_INCREMENT, + `group_name` VARCHAR(255), + `creator_name` VARCHAR(255), + `timestamp` BIGINT, + `type` VARCHAR(32), + `parent_group_id` INTEGER +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS GroupDNPair ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + group_id INTEGER, + dn VARCHAR(255) +)ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS GroupStructure ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + group_id INTEGER, + path VARCHAR(1024), + UNIQUE INDEX(group_id) +)ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS `GroupUser` ( + `id` BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + `group_id` BIGINT, + `user_name` VARCHAR(255), + `is_staff` tinyint, + UNIQUE INDEX (`group_id`, `user_name`), + INDEX (`user_name`) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS LDAPConfig ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + cfg_group VARCHAR(255) NOT NULL, + cfg_key VARCHAR(255) NOT NULL, + value VARCHAR(255), + property INTEGER +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS LDAPUsers ( + id BIGINT PRIMARY KEY AUTO_INCREMENT, + email VARCHAR(255) NOT NULL, + password varchar(255) NOT NULL, + is_staff BOOL NOT NULL, + is_active BOOL NOT NULL, + extra_attrs TEXT, + reference_id VARCHAR(255), + UNIQUE INDEX(email), + UNIQUE INDEX (reference_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS OrgGroup ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INTEGER, + group_id INTEGER, + INDEX (group_id), + UNIQUE INDEX(org_id, group_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS OrgUser ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INTEGER, + email VARCHAR(255), + is_staff BOOL NOT NULL, + INDEX (email), + UNIQUE INDEX(org_id, email) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS Organization ( + org_id BIGINT PRIMARY KEY AUTO_INCREMENT, + org_name VARCHAR(255), + url_prefix VARCHAR(255), + creator VARCHAR(255), + ctime BIGINT, + UNIQUE INDEX (url_prefix) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS UserRole ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + email VARCHAR(255), + role VARCHAR(255), + is_manual_set INTEGER DEFAULT 0, + UNIQUE INDEX (email) +) ENGINE=INNODB; diff --git a/scripts/sql/mysql/seafile.sql b/scripts/sql/mysql/seafile.sql new file mode 100644 index 0000000000..0051884904 --- /dev/null +++ b/scripts/sql/mysql/seafile.sql @@ -0,0 +1,326 @@ +CREATE TABLE IF NOT EXISTS Branch ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + name VARCHAR(10), + repo_id CHAR(41), + commit_id CHAR(41), + UNIQUE INDEX(repo_id, name) +) ENGINE = INNODB; + +CREATE TABLE IF NOT EXISTS FileLockTimestamp ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(40), + update_time BIGINT NOT NULL, + UNIQUE INDEX(repo_id) +); + +CREATE TABLE IF NOT EXISTS FileLocks ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(40) NOT NULL, + path TEXT NOT NULL, + user_name VARCHAR(255) NOT NULL, + lock_time BIGINT, + expire BIGINT, + KEY(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS FolderGroupPerm ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36) NOT NULL, + path TEXT NOT NULL, + permission CHAR(15), + group_id INTEGER NOT NULL, + INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS FolderPermTimestamp ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36), + timestamp BIGINT, + UNIQUE INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS FolderUserPerm ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36) NOT NULL, + path TEXT NOT NULL, + permission CHAR(15), + user VARCHAR(255) NOT NULL, + INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS GCID ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36), + gc_id CHAR(36), + UNIQUE INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS GarbageRepos ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36), + UNIQUE INDEX(repo_id) +); + +CREATE TABLE IF NOT EXISTS InnerPubRepo ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + permission CHAR(15), + UNIQUE INDEX (repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS LastGCID ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36), + client_id VARCHAR(128), + gc_id CHAR(36), + UNIQUE INDEX(repo_id, client_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS OrgGroupRepo ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INTEGER, + repo_id CHAR(37), + group_id INTEGER, + owner VARCHAR(255), + permission CHAR(15), + UNIQUE INDEX(org_id, group_id, repo_id), + INDEX (repo_id), INDEX (owner) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS OrgInnerPubRepo ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INTEGER, + repo_id CHAR(37), + UNIQUE INDEX(org_id, repo_id), + permission CHAR(15) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS OrgQuota ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INTEGER, + quota BIGINT, + UNIQUE INDEX(org_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS OrgRepo ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INTEGER, + repo_id CHAR(37), + user VARCHAR(255), + UNIQUE INDEX(org_id, repo_id), + UNIQUE INDEX (repo_id), + INDEX (org_id, user), + INDEX(user) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS OrgSharedRepo ( + id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INT, + repo_id CHAR(37) , + from_email VARCHAR(255), + to_email VARCHAR(255), + permission CHAR(15), + INDEX(repo_id), + INDEX (org_id, repo_id), + INDEX(from_email), INDEX(to_email) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS OrgUserQuota ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INTEGER, + user VARCHAR(255), + quota BIGINT, + UNIQUE INDEX(org_id, user) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS Repo ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + UNIQUE INDEX (repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoFileCount ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36), + file_count BIGINT UNSIGNED, + UNIQUE INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoGroup ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + group_id INTEGER, + user_name VARCHAR(255), + permission CHAR(15), + UNIQUE INDEX(group_id, repo_id), + INDEX (repo_id), INDEX (user_name) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoHead ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + branch_name VARCHAR(10), + UNIQUE INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoHistoryLimit ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + days INTEGER, + UNIQUE INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoInfo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36), + name VARCHAR(255) NOT NULL, + update_time BIGINT, + version INTEGER, + is_encrypted INTEGER, + last_modifier VARCHAR(255), + status INTEGER DEFAULT 0, + UNIQUE INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoOwner ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + owner_id VARCHAR(255), + UNIQUE INDEX (repo_id), + INDEX (owner_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoSize ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + size BIGINT UNSIGNED, + head_id CHAR(41), + UNIQUE INDEX (repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoStorageId ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(40) NOT NULL, + storage_id VARCHAR(255) NOT NULL, + UNIQUE INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoSyncError ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + token CHAR(41), + error_time BIGINT UNSIGNED, + error_con VARCHAR(1024), + UNIQUE INDEX(token) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + token CHAR(41), + peer_id CHAR(41), + peer_ip VARCHAR(41), + peer_name VARCHAR(255), + sync_time BIGINT, + client_ver VARCHAR(20), + UNIQUE INDEX(token), + INDEX(peer_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoTrash ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36), + repo_name VARCHAR(255), + head_id CHAR(40), + owner_id VARCHAR(255), + size BIGINT(20), + org_id INTEGER, + del_time BIGINT, + UNIQUE INDEX(repo_id), + INDEX(owner_id), + INDEX(org_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoUserToken ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + email VARCHAR(255), + token CHAR(41), + UNIQUE INDEX(repo_id, token), + INDEX(token), + INDEX (email) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoValidSince ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + timestamp BIGINT, + UNIQUE INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RoleQuota ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + role VARCHAR(255), + quota BIGINT, + UNIQUE INDEX(role) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS SeafileConf ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + cfg_group VARCHAR(255) NOT NULL, + cfg_key VARCHAR(255) NOT NULL, + value VARCHAR(255), + property INTEGER +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS SharedRepo ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37) , + from_email VARCHAR(255), + to_email VARCHAR(255), + permission CHAR(15), + INDEX (repo_id), + INDEX(from_email), + INDEX(to_email) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS SystemInfo ( + id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + info_key VARCHAR(256), + info_value VARCHAR(1024) +); + +CREATE TABLE IF NOT EXISTS UserQuota ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + user VARCHAR(255), + quota BIGINT, + UNIQUE INDEX(user) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS UserShareQuota ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + user VARCHAR(255), + quota BIGINT, + UNIQUE INDEX(user) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS VirtualRepo ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(36), + origin_repo CHAR(36), + path TEXT, + base_commit CHAR(40), + UNIQUE INDEX(repo_id), + INDEX(origin_repo) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS WebAP ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(37), + access_property CHAR(10), + UNIQUE INDEX(repo_id) +) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS WebUploadTempFiles ( + id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, + repo_id CHAR(40) NOT NULL, + file_path TEXT NOT NULL, + tmp_file_path TEXT NOT NULL +) ENGINE=INNODB; diff --git a/scripts/sql/sqlite/config.sql b/scripts/sql/sqlite/config.sql new file mode 100644 index 0000000000..4c7a75c618 --- /dev/null +++ b/scripts/sql/sqlite/config.sql @@ -0,0 +1 @@ +CREATE TABLE IF NOT EXISTS Config (key TEXT PRIMARY KEY, value TEXT); diff --git a/scripts/sql/sqlite/groupmgr.sql b/scripts/sql/sqlite/groupmgr.sql new file mode 100644 index 0000000000..f0b6bdd1f3 --- /dev/null +++ b/scripts/sql/sqlite/groupmgr.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS `Group` (`group_id` INTEGER PRIMARY KEY AUTOINCREMENT, `group_name` VARCHAR(255), `creator_name` VARCHAR(255), `timestamp` BIGINT, `type` VARCHAR(32), `parent_group_id` INTEGER); +CREATE TABLE IF NOT EXISTS `GroupUser` (`group_id` INTEGER, `user_name` VARCHAR(255), `is_staff` tinyint); +CREATE UNIQUE INDEX IF NOT EXISTS groupid_username_indx on `GroupUser` (`group_id`, `user_name`); +CREATE INDEX IF NOT EXISTS username_indx on `GroupUser` (`user_name`); +CREATE TABLE IF NOT EXISTS GroupDNPair (group_id INTEGER, dn VARCHAR(255)); +CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, path VARCHAR(1024)); + diff --git a/scripts/sql/sqlite/org.sql b/scripts/sql/sqlite/org.sql new file mode 100644 index 0000000000..1fc1c804b9 --- /dev/null +++ b/scripts/sql/sqlite/org.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS OrgGroup (org_id INTEGER, group_id INTEGER); +CREATE INDEX IF NOT EXISTS groupid_indx on OrgGroup (group_id); + + +CREATE TABLE IF NOT EXISTS Organization (org_id INTEGER PRIMARY KEY AUTOINCREMENT, org_name VARCHAR(255), url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT); +CREATE UNIQUE INDEX IF NOT EXISTS url_prefix_indx on Organization (url_prefix); + +CREATE TABLE IF NOT EXISTS OrgUser (org_id INTEGER, email TEXT, is_staff bool NOT NULL); +CREATE INDEX IF NOT EXISTS email_indx on OrgUser (email); +CREATE UNIQUE INDEX IF NOT EXISTS orgid_email_indx on OrgUser (org_id, email); diff --git a/scripts/sql/sqlite/seafile.sql b/scripts/sql/sqlite/seafile.sql new file mode 100644 index 0000000000..de9e40d673 --- /dev/null +++ b/scripts/sql/sqlite/seafile.sql @@ -0,0 +1,65 @@ +CREATE TABLE IF NOT EXISTS Branch (name VARCHAR(10), repo_id CHAR(40), commit_id CHAR(40), PRIMARY KEY (repo_id, name)); +CREATE TABLE IF NOT EXISTS Repo (repo_id CHAR(37) PRIMARY KEY); +CREATE TABLE IF NOT EXISTS RepoOwner (repo_id CHAR(37) PRIMARY KEY, owner_id TEXT); +CREATE INDEX IF NOT EXISTS OwnerIndex ON RepoOwner (owner_id); + +CREATE TABLE IF NOT EXISTS RepoGroup (repo_id CHAR(37), group_id INTEGER, user_name TEXT, permission CHAR(15)); +CREATE UNIQUE INDEX IF NOT EXISTS groupid_repoid_indx on RepoGroup (group_id, repo_id); +CREATE INDEX IF NOT EXISTS repogroup_repoid_index on RepoGroup (repo_id); +CREATE INDEX IF NOT EXISTS repogroup_username_indx on RepoGroup (user_name); +CREATE TABLE IF NOT EXISTS InnerPubRepo (repo_id CHAR(37) PRIMARY KEY, permission CHAR(15)); + +CREATE TABLE IF NOT EXISTS OrgRepo (org_id INTEGER, repo_id CHAR(37), user VARCHAR(255)); +CREATE UNIQUE INDEX IF NOT EXISTS repoid_indx on OrgRepo (repo_id); +CREATE INDEX IF NOT EXISTS orgid_repoid_indx on OrgRepo (org_id, repo_id); +CREATE INDEX IF NOT EXISTS orgrepo_orgid_user_indx on OrgRepo (org_id, user); +CREATE INDEX IF NOT EXISTS orgrepo_user_indx on OrgRepo (user); +CREATE TABLE IF NOT EXISTS OrgGroupRepo (org_id INTEGER, repo_id CHAR(37), group_id INTEGER, owner VARCHAR(255), permission CHAR(15)); +CREATE UNIQUE INDEX IF NOT EXISTS orgid_groupid_repoid_indx on OrgGroupRepo (org_id, group_id, repo_id); +CREATE INDEX IF NOT EXISTS org_repoid_index on OrgGroupRepo (repo_id); +CREATE INDEX IF NOT EXISTS org_owner_indx on OrgGroupRepo (owner); +CREATE TABLE IF NOT EXISTS OrgInnerPubRepo (org_id INTEGER, repo_id CHAR(37), permission CHAR(15), PRIMARY KEY (org_id, repo_id)); +CREATE TABLE IF NOT EXISTS RepoUserToken (repo_id CHAR(37), email VARCHAR(255), token CHAR(41)); +CREATE UNIQUE INDEX IF NOT EXISTS repo_token_indx on RepoUserToken (repo_id, token); +CREATE INDEX IF NOT EXISTS repo_token_email_indx on RepoUserToken (email); +CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (token CHAR(41) PRIMARY KEY, peer_id CHAR(41), peer_ip VARCHAR(41), peer_name VARCHAR(255), sync_time BIGINT, client_ver VARCHAR(20)); +CREATE TABLE IF NOT EXISTS RepoSyncError (token CHAR(41) PRIMARY KEY, error_time BIGINT, error_con VARCHAR(1024)); +CREATE TABLE IF NOT EXISTS RepoHead (repo_id CHAR(37) PRIMARY KEY, branch_name VARCHAR(10)); +CREATE TABLE IF NOT EXISTS RepoSize (repo_id CHAR(37) PRIMARY KEY, size BIGINT UNSIGNED, head_id CHAR(41)); +CREATE TABLE IF NOT EXISTS RepoHistoryLimit (repo_id CHAR(37) PRIMARY KEY, days INTEGER); +CREATE TABLE IF NOT EXISTS RepoValidSince (repo_id CHAR(37) PRIMARY KEY, timestamp BIGINT); +CREATE TABLE IF NOT EXISTS WebAP (repo_id CHAR(37) PRIMARY KEY, access_property CHAR(10)); +CREATE TABLE IF NOT EXISTS VirtualRepo (repo_id CHAR(36) PRIMARY KEY, origin_repo CHAR(36), path TEXT, base_commit CHAR(40)); +CREATE INDEX IF NOT EXISTS virtualrepo_origin_repo_idx ON VirtualRepo (origin_repo); +CREATE TABLE IF NOT EXISTS GarbageRepos (repo_id CHAR(36) PRIMARY KEY); +CREATE TABLE IF NOT EXISTS RepoTrash (repo_id CHAR(36) PRIMARY KEY, repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255), size BIGINT UNSIGNED, org_id INTEGER, del_time BIGINT); +CREATE INDEX IF NOT EXISTS repotrash_owner_id_idx ON RepoTrash(owner_id); +CREATE INDEX IF NOT EXISTS repotrash_org_id_idx ON RepoTrash(org_id); +CREATE TABLE IF NOT EXISTS RepoFileCount (repo_id CHAR(36) PRIMARY KEY, file_count BIGINT UNSIGNED); +CREATE TABLE IF NOT EXISTS FolderUserPerm (repo_id CHAR(36) NOT NULL, path TEXT NOT NULL, permission CHAR(15), user VARCHAR(255) NOT NULL); +CREATE INDEX IF NOT EXISTS folder_user_perm_idx ON FolderUserPerm(repo_id); +CREATE TABLE IF NOT EXISTS FolderGroupPerm (repo_id CHAR(36) NOT NULL, path TEXT NOT NULL, permission CHAR(15), group_id INTEGER NOT NULL); +CREATE INDEX IF NOT EXISTS folder_group_perm_idx ON FolderGroupPerm(repo_id); +CREATE TABLE IF NOT EXISTS FolderPermTimestamp (repo_id CHAR(36) PRIMARY KEY, timestamp INTEGER); +CREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL); +CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, name VARCHAR(255) NOT NULL, update_time INTEGER, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0); +CREATE TABLE IF NOT EXISTS RepoStorageId (repo_id CHAR(40) NOT NULL, storage_id VARCHAR(255) NOT NULL); +CREATE TABLE IF NOT EXISTS UserQuota (user VARCHAR(255) PRIMARY KEY, quota BIGINT); +CREATE TABLE IF NOT EXISTS UserShareQuota (user VARCHAR(255) PRIMARY KEY, quota BIGINT); +CREATE TABLE IF NOT EXISTS OrgQuota (org_id INTEGER PRIMARY KEY, quota BIGINT); +CREATE TABLE IF NOT EXISTS OrgUserQuota (org_id INTEGER, user VARCHAR(255), quota BIGINT, PRIMARY KEY (org_id, user)); +CREATE TABLE IF NOT EXISTS RoleQuota (role VARCHAR(255) PRIMARY KEY, quota BIGINT); +CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER); +CREATE TABLE IF NOT EXISTS FileLocks (repo_id CHAR(40) NOT NULL, path TEXT NOT NULL, user_name VARCHAR(255) NOT NULL, lock_time BIGINT, expire BIGINT); +CREATE INDEX IF NOT EXISTS FileLocksIndex ON FileLocks (repo_id); +CREATE TABLE IF NOT EXISTS FileLockTimestamp (repo_id CHAR(40) PRIMARY KEY, update_time BIGINT NOT NULL); +CREATE TABLE IF NOT EXISTS SharedRepo (repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15)); +CREATE INDEX IF NOT EXISTS RepoIdIndex on SharedRepo (repo_id); +CREATE INDEX IF NOT EXISTS FromEmailIndex on SharedRepo (from_email); +CREATE INDEX IF NOT EXISTS ToEmailIndex on SharedRepo (to_email); +CREATE TABLE IF NOT EXISTS OrgSharedRepo (org_id INTEGER, repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15)); +CREATE INDEX IF NOT EXISTS OrgRepoIdIndex on OrgSharedRepo (org_id, repo_id); +CREATE INDEX IF NOT EXISTS OrgFromEmailIndex on OrgSharedRepo (from_email); +CREATE INDEX IF NOT EXISTS OrgToEmailIndex on OrgSharedRepo (to_email); +CREATE INDEX IF NOT EXISTS OrgLibIdIndex on OrgSharedRepo (repo_id); +CREATE TABLE IF NOT EXISTS SystemInfo (info_key VARCHAR(256), info_value VARCHAR(1024)); diff --git a/scripts/sql/sqlite/user.sql b/scripts/sql/sqlite/user.sql new file mode 100644 index 0000000000..af86cf76a9 --- /dev/null +++ b/scripts/sql/sqlite/user.sql @@ -0,0 +1,16 @@ +CREATE TABLE IF NOT EXISTS Binding (email TEXT, peer_id TEXT); +CREATE UNIQUE INDEX IF NOT EXISTS peer_index on Binding (peer_id); + +CREATE TABLE IF NOT EXISTS EmailUser (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, email TEXT, passwd TEXT, is_staff bool NOT NULL, is_active bool NOT NULL, ctime INTEGER, reference_id TEXT); +CREATE UNIQUE INDEX IF NOT EXISTS email_index on EmailUser (email); +CREATE UNIQUE INDEX IF NOT EXISTS reference_id_index on EmailUser (reference_id); + +CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER); + +CREATE TABLE IF NOT EXISTS LDAPUsers (id INTEGER PRIMARY KEY AUTOINCREMENT, email TEXT NOT NULL, password TEXT NOT NULL, is_staff BOOL NOT NULL, is_active BOOL NOT NULL, extra_attrs TEXT, reference_id TEXT); +CREATE UNIQUE INDEX IF NOT EXISTS ldapusers_email_index on LDAPUsers(email); +CREATE UNIQUE INDEX IF NOT EXISTS ldapusers_reference_id_index on LDAPUsers(reference_id); + +CREATE TABLE IF NOT EXISTS UserRole (email TEXT, role TEXT, is_manual_set INTEGER DEFAULT 0); +CREATE INDEX IF NOT EXISTS userrole_email_index on UserRole (email); +CREATE UNIQUE INDEX IF NOT EXISTS userrole_userrole_index on UserRole (email, role); diff --git a/scripts/sqlite2mysql.py b/scripts/sqlite2mysql.py new file mode 100644 index 0000000000..8b30985241 --- /dev/null +++ b/scripts/sqlite2mysql.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python + +"""Lifted from: +http://stackoverflow.com/questions/18671/quick-easy-way-to-migrate-sqlite3-to-mysql + +Run like so: + +sqlite3 .db .dump | python sqlite2mysql.py > .sql + +Then you can import the .sql file into MySql + +Note - you need to add foreign key constrains manually since sqlite doesn't actually support them +""" +import re +import fileinput + +def this_line_is_useless(line): + useless_es = [ + 'BEGIN TRANSACTION', + 'COMMIT', + 'sqlite_sequence', + 'CREATE UNIQUE INDEX', + 'PRAGMA', + ] + for useless in useless_es: + if re.search(useless, line): + return True + +def has_primary_key(line): + return bool(re.search(r'PRIMARY KEY', line)) + +for line in fileinput.input(): + searching_for_end = False + if this_line_is_useless(line): continue + + # this line was necessary because ''); was getting + # converted (inappropriately) to \'); + if re.match(r".*, ''\);", line): + line = re.sub(r"''\);", r'``);', line) + + if re.match(r'^CREATE TABLE.*', line): + searching_for_end = True + + m = re.search('CREATE TABLE [`"]?(\w*)[`"]?(.*)', line) + if m: + name, sub = m.groups() + sub = sub.replace('"','`') + line = "DROP TABLE IF EXISTS `%(name)s`;\nCREATE TABLE IF NOT EXISTS `%(name)s`%(sub)s\n" + line = line % dict(name=name, sub=sub) + else: + m = re.search('INSERT INTO "(\w*)"(.*)', line) + if m: + name, sub = m.groups() + line = 'INSERT INTO `%s`%s\n' % m.groups() + line = line.replace('"', r'\"') + line = line.replace('"', "'") + # line = re.sub(r"([^'])'t'(.)", r"\1THIS_IS_TRUE\2", line) + # line = line.replace('THIS_IS_TRUE', '1') + # line = re.sub(r"([^'])'f'(.)", r"\1THIS_IS_FALSE\2", line) + # line = line.replace('THIS_IS_FALSE', '0') + + # Add auto_increment if it's not there since sqlite auto_increments ALL + # primary keys + if searching_for_end: + if re.search(r"integer(?:\s+\w+)*\s*PRIMARY KEY(?:\s+\w+)*\s*,", line, re.I): + line = line.replace("PRIMARY KEY", "PRIMARY KEY AUTO_INCREMENT") + # replace " and ' with ` because mysql doesn't like quotes in CREATE commands + line = line.replace('"', '`').replace("'", '`') + + # And now we convert it back (see above) + if re.match(r".*, ``\);", line): + line = re.sub(r'``\);', r"'');", line) + + if searching_for_end and re.match(r'.*\);', line): + searching_for_end = False + + if re.match(r"CREATE INDEX", line): + line = re.sub('"', '`', line) + + line = line.replace('"', '`') + line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT') + print(line) diff --git a/scripts/sqlite2mysql.sh b/scripts/sqlite2mysql.sh new file mode 100755 index 0000000000..9c4fccf6d7 --- /dev/null +++ b/scripts/sqlite2mysql.sh @@ -0,0 +1,118 @@ +#!/bin/sh +# +# This shell script and corresponding sqlite2mysql.py are used to +# migrate Seafile data from SQLite to MySQL. +# +# Setup: +# +# 1. Move this file and sqlite2mysql.py to the top directory of your Seafile +# installation path (e.g. /data/haiwen). +# 2. Run: ./sqlite2mysql.sh +# 3. Three files(ccnet-db.sql, seafile-db.sql, seahub-db.sql) are created. +# 4. Loads these files to MySQL +# (mysql> source ccnet-db.sql) +# + +CCNET_DB='ccnet-db.sql' +SEAFILE_DB='seafile-db.sql' +SEAHUB_DB='seahub-db.sql' + +########## ccnet +seafile_path=$(pwd) +if [ -f "${seafile_path}/conf/ccnet.conf" ]; then + USER_MGR_DB=${seafile_path}/ccnet/PeerMgr/usermgr.db + GRP_MGR_DB=${seafile_path}/ccnet/GroupMgr/groupmgr.db +else + echo "${seafile_path}/conf/ccnet.conf does not exists." + read -p "Please provide your ccnet.conf path(e.g. /data/haiwen/conf/ccnet.conf): " ccnet_conf_path + if [ -f ${ccnet_conf_path} ]; then + USER_MGR_DB=$(dirname $(dirname "${ccnet_conf_path}"))/ccnet/PeerMgr/usermgr.db + GRP_MGR_DB=$(dirname $(dirname "${ccnet_conf_path}"))/ccnet/GroupMgr/groupmgr.db + else + echo "${ccnet_conf_path} does not exists, quit." + exit 1 + fi +fi + +rm -rf ${CCNET_DB} + +echo "sqlite3 ${USER_MGR_DB} .dump | python sqlite2mysql.py > ${CCNET_DB}" +sqlite3 ${USER_MGR_DB} .dump | python sqlite2mysql.py > ${CCNET_DB} +echo "sqlite3 ${GRP_MGR_DB} .dump | python sqlite2mysql.py >> ${CCNET_DB}" +sqlite3 ${GRP_MGR_DB} .dump | python sqlite2mysql.py >> ${CCNET_DB} + +# change ctime from INTEGER to BIGINT in EmailUser table +sed 's/ctime INTEGER/ctime BIGINT/g' ${CCNET_DB} > ${CCNET_DB}.tmp && mv ${CCNET_DB}.tmp ${CCNET_DB} + +# change email in UserRole from TEXT to VARCHAR(255) +sed 's/email TEXT, role TEXT/email VARCHAR(255), role TEXT/g' ${CCNET_DB} > ${CCNET_DB}.tmp && mv ${CCNET_DB}.tmp ${CCNET_DB} + +########## seafile +rm -rf ${SEAFILE_DB} + +if [ -f "${seafile_path}/seafile-data/seafile.db" ]; then + echo "sqlite3 ${seafile_path}/seafile-data/seafile.db .dump | python sqlite2mysql.py > ${SEAFILE_DB}" + sqlite3 ${seafile_path}/seafile-data/seafile.db .dump | python sqlite2mysql.py > ${SEAFILE_DB} +else + echo "${seafile_path}/seafile-data/seafile.db does not exists." + read -p "Please provide your seafile.db path(e.g. /data/haiwen/seafile-data/seafile.db): " seafile_db_path + if [ -f ${seafile_db_path} ];then + echo "sqlite3 ${seafile_db_path} .dump | python sqlite2mysql.py > ${SEAFILE_DB}" + sqlite3 ${seafile_db_path} .dump | python sqlite2mysql.py > ${SEAFILE_DB} + else + echo "${seafile_db_path} does not exists, quit." + exit 1 + fi +fi + +# change owner_id in RepoOwner from TEXT to VARCHAR(255) +sed 's/owner_id TEXT/owner_id VARCHAR(255)/g' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} + +# change user_name in RepoGroup from TEXT to VARCHAR(255) +sed 's/user_name TEXT/user_name VARCHAR(255)/g' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} + +########## seahub +rm -rf ${SEAHUB_DB} + +if [ -f "${seafile_path}/seahub.db" ]; then + echo "sqlite3 ${seafile_path}/seahub.db .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}" + sqlite3 ${seafile_path}/seahub.db .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB} +else + echo "${seafile_path}/seahub.db does not exists." + read -p "Please prove your seahub.db path(e.g. /data/haiwen/seahub.db): " seahub_db_path + if [ -f ${seahub_db_path} ]; then + echo "sqlite3 ${seahub_db_path} .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}" + sqlite3 ${seahub_db_path} .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB} + else + echo "${seahub_db_path} does not exists, quit." + exit 1 + fi +fi + +# change username from VARCHAR(256) to VARCHAR(255) in wiki_personalwiki +sed 's/varchar(256) NOT NULL UNIQUE/varchar(255) NOT NULL UNIQUE/g' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + +# remove unique from contacts_contact +sed 's/, UNIQUE (`user_email`, `contact_email`)//g' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + +# remove base_dirfileslastmodifiedinfo records to avoid json string parsing issue between sqlite and mysql +sed '/INSERT INTO `base_dirfileslastmodifiedinfo`/d' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + +# remove notifications_usernotification records to avoid json string parsing issue between sqlite and mysql +sed '/INSERT INTO `notifications_usernotification`/d' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + + +########## common logic + +# add ENGIN=INNODB to create table statment +for sql_file in $CCNET_DB $SEAFILE_DB $SEAHUB_DB +do + sed -r 's/(CREATE TABLE.*);/\1 ENGINE=INNODB;/g' $sql_file > $sql_file.tmp && mv $sql_file.tmp $sql_file +done + +# remove COLLATE NOCASE if possible +for sql_file in $CCNET_DB $SEAFILE_DB $SEAHUB_DB +do + sed 's/COLLATE NOCASE//g' $sql_file > $sql_file.tmp && mv $sql_file.tmp $sql_file +done + diff --git a/scripts/upgrade/add_collate.sh b/scripts/upgrade/add_collate.sh new file mode 100755 index 0000000000..e85a74d73e --- /dev/null +++ b/scripts/upgrade/add_collate.sh @@ -0,0 +1,75 @@ +#!/bin/sh +# +# This shell script is used to add COLLATE NOCASE to email field to avoid case +# issue in sqlite. +# +# 1. ./add-collate.sh +# + +USER_DB='/tmp/user-db.sql' +GROUP_DB='/tmp/group-db.sql' +SEAFILE_DB='/tmp/seafile-db.sql' +SEAHUB_DB='/tmp/seahub-db.sql' + +ccnet_dir=$1 + +########## ccnet +USER_MGR_DB=${ccnet_dir}/PeerMgr/usermgr.db +GRP_MGR_DB=${ccnet_dir}/GroupMgr/groupmgr.db + +rm -rf ${USER_DB} +rm -rf ${GROUP_DB} + +echo "sqlite3 ${USER_MGR_DB} .dump > ${USER_DB}" +sqlite3 ${USER_MGR_DB} .dump > ${USER_DB} +echo "sqlite3 ${GRP_MGR_DB} .dump > ${GROUP_DB}" +sqlite3 ${GRP_MGR_DB} .dump > ${GROUP_DB} + +sed -r 's/(CREATE TABLE EmailUser.*)email TEXT,(.*)/\1email TEXT COLLATE NOCASE,\2/I' ${USER_DB} > ${USER_DB}.tmp && mv ${USER_DB}.tmp ${USER_DB} +sed -r 's/(CREATE TABLE Binding.*)email TEXT,(.*)/\1email TEXT COLLATE NOCASE,\2/I' ${USER_DB} > ${USER_DB}.tmp && mv ${USER_DB}.tmp ${USER_DB} +sed -r 's/(CREATE TABLE `Group`.*)`creator_name` VARCHAR\(255\),(.*)/\1`creator_name` VARCHAR\(255\) COLLATE NOCASE,\2/I' ${GROUP_DB} > ${GROUP_DB}.tmp && mv ${GROUP_DB}.tmp ${GROUP_DB} +sed -r 's/(CREATE TABLE `GroupUser`.*)`user_name` VARCHAR\(255\),(.*)/\1`user_name` VARCHAR\(255\) COLLATE NOCASE,\2/I' ${GROUP_DB} > ${GROUP_DB}.tmp && mv ${GROUP_DB}.tmp ${GROUP_DB} + +# backup & restore +mv ${USER_MGR_DB} ${USER_MGR_DB}.`date +"%Y%m%d%H%M%S"` +mv ${GRP_MGR_DB} ${GRP_MGR_DB}.`date +"%Y%m%d%H%M%S"` +sqlite3 ${USER_MGR_DB} < ${USER_DB} +sqlite3 ${GRP_MGR_DB} < ${GROUP_DB} + +########## seafile +rm -rf ${SEAFILE_DB} + +SEAFILE_DB_FILE=$2/seafile.db +echo "sqlite3 ${SEAFILE_DB_FILE} .dump > ${SEAFILE_DB}" +sqlite3 ${SEAFILE_DB_FILE} .dump > ${SEAFILE_DB} + +sed -r 's/(CREATE TABLE RepoOwner.*)owner_id TEXT(.*)/\1owner_id TEXT COLLATE NOCASE\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} +sed -r 's/(CREATE TABLE RepoGroup.*)user_name TEXT,(.*)/\1user_name TEXT COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} +sed -r 's/(CREATE TABLE RepoUserToken.*)email VARCHAR\(255\),(.*)/\1email VARCHAR\(255\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} +sed -r 's/(CREATE TABLE UserQuota.*)user VARCHAR\(255\),(.*)/\1user VARCHAR\(255\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} +sed -r 's/(CREATE TABLE SharedRepo.*)from_email VARCHAR\(512\), to_email VARCHAR\(512\),(.*)/\1from_email VARCHAR\(512\), to_email VARCHAR\(512\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB} + +# backup & restore +mv ${SEAFILE_DB_FILE} ${SEAFILE_DB_FILE}.`date +"%Y%m%d%H%M%S"` +sqlite3 ${SEAFILE_DB_FILE} < ${SEAFILE_DB} + +########## seahub +rm -rf ${SEAHUB_DB} + +SEAHUB_DB_FILE=$3 +echo "sqlite3 ${SEAHUB_DB_FILE} .Dump | tr -d '\n' | sed 's/;/;\n/g' > ${SEAHUB_DB}" +sqlite3 ${SEAHUB_DB_FILE} .dump | tr -d '\n' | sed 's/;/;\n/g' > ${SEAHUB_DB} + +sed -r 's/(CREATE TABLE "notifications_usernotification".*)"to_user" varchar\(255\) NOT NULL,(.*)/\1"to_user" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "profile_profile".*)"user" varchar\(75\) NOT NULL UNIQUE,(.*)/\1"user" varchar\(75\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "share_fileshare".*)"username" varchar\(255\) NOT NULL,(.*)/\1"username" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "api2_token".*)"user" varchar\(255\) NOT NULL UNIQUE,(.*)/\1"user" varchar\(255\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "wiki_personalwiki".*)"username" varchar\(256\) NOT NULL UNIQUE,(.*)/\1"username" varchar\(256\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "message_usermessage".*)"from_email" varchar\(75\) NOT NULL,\s*"to_email" varchar\(75\) NOT NULL,(.*)/\1"from_email" varchar\(75\) NOT NULL COLLATE NOCASE, "to_email" varchar\(75\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} +sed -r 's/(CREATE TABLE "avatar_avatar".*)"emailuser" varchar\(255\) NOT NULL,(.*)/\1"emailuser" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB} + +# backup & restore +mv ${SEAHUB_DB_FILE} ${SEAHUB_DB_FILE}.`date +"%Y%m%d%H%M%S"` +sqlite3 ${SEAHUB_DB_FILE} < ${SEAHUB_DB} + +rm -rf ${USER_DB} ${GROUP_DB} ${SEAFILE_DB} ${SEAHUB_DB} diff --git a/scripts/upgrade/db_update_1.3_1.4.py b/scripts/upgrade/db_update_1.3_1.4.py new file mode 100644 index 0000000000..d0224aa707 --- /dev/null +++ b/scripts/upgrade/db_update_1.3_1.4.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +import sqlite3 +import os +import sys + +def usage(): + msg = 'usage: %s ' % os.path.basename(sys.argv[0]) + print(msg) + +def main(): + seahub_db = sys.argv[1] + + conn = sqlite3.connect(seahub_db) + c = conn.cursor() + + try: + c.execute('SELECT s_type from share_fileshare') + except sqlite3.OperationalError: + # only add this column if not exist yet, so this script is idempotent + c.execute('ALTER table share_fileshare add column "s_type" varchar(2) NOT NULL DEFAULT "f"') + + c.execute('CREATE INDEX IF NOT EXISTS "share_fileshare_f775835c" ON "share_fileshare" ("s_type")') + + sql = '''CREATE TABLE IF NOT EXISTS "base_dirfileslastmodifiedinfo" ( + "id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, + "repo_id" varchar(36) NOT NULL, + "parent_dir" text NOT NULL, + "parent_dir_hash" varchar(12) NOT NULL, + "dir_id" varchar(40) NOT NULL, + "last_modified_info" text NOT NULL, + UNIQUE ("repo_id", "parent_dir_hash"))''' + + c.execute(sql) + + sql = '''CREATE TABLE IF NOT EXISTS "api2_token" ( + "key" varchar(40) NOT NULL PRIMARY KEY, + "user" varchar(255) NOT NULL UNIQUE, + "created" datetime NOT NULL)''' + + c.execute(sql) + + conn.commit() + +if __name__ == '__main__': + if len(sys.argv) != 2: + usage() + sys.exit(1) + + main() + + diff --git a/scripts/upgrade/db_update_helper.py b/scripts/upgrade/db_update_helper.py new file mode 100644 index 0000000000..9eab54cc08 --- /dev/null +++ b/scripts/upgrade/db_update_helper.py @@ -0,0 +1,384 @@ +# coding: UTF-8 + +import sys +import os +import configparser +import glob + +HAS_PYMYSQL = True +try: + import pymysql +except ImportError: + HAS_PYMYSQL = False + +HAS_SQLITE3 = True +try: + import sqlite3 +except ImportError: + HAS_SQLITE3 = False + +class EnvManager(object): + def __init__(self): + self.upgrade_dir = os.path.dirname(__file__) + self.install_path = os.path.dirname(self.upgrade_dir) + self.top_dir = os.path.dirname(self.install_path) + self.ccnet_dir = os.environ['CCNET_CONF_DIR'] + self.seafile_dir = os.environ['SEAFILE_CONF_DIR'] + self.central_config_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR') + + +env_mgr = EnvManager() + + +class Utils(object): + @staticmethod + def highlight(content, is_error=False): + '''Add ANSI color to content to get it highlighted on terminal''' + if is_error: + return '\x1b[1;31m%s\x1b[m' % content + else: + return '\x1b[1;32m%s\x1b[m' % content + + @staticmethod + def info(msg): + print(Utils.highlight('[INFO] ') + msg) + + @staticmethod + def warning(msg): + print(Utils.highlight('[WARNING] ') + msg) + + @staticmethod + def error(msg): + print(Utils.highlight('[ERROR] ') + msg) + sys.exit(1) + + @staticmethod + def read_config(config_path, defaults): + if not os.path.exists(config_path): + Utils.error('Config path %s doesn\'t exist, stop db upgrade' % + config_path) + cp = configparser.ConfigParser(defaults) + cp.read(config_path) + return cp + + +class MySQLDBInfo(object): + def __init__(self, host, port, username, password, db, unix_socket=None): + self.host = host + self.port = port + self.username = username + self.password = password + self.db = db + self.unix_socket = unix_socket + + +class DBUpdater(object): + def __init__(self, version, name): + self.sql_dir = os.path.join(env_mgr.upgrade_dir, 'sql', version, name) + pro_path = os.path.join(env_mgr.install_path, 'pro') + self.is_pro = os.path.exists(pro_path) + + @staticmethod + def get_instance(version): + '''Detect whether we are using mysql or sqlite3''' + ccnet_db_info = DBUpdater.get_ccnet_mysql_info(version) + seafile_db_info = DBUpdater.get_seafile_mysql_info(version) + seahub_db_info = DBUpdater.get_seahub_mysql_info() + + if ccnet_db_info and seafile_db_info and seahub_db_info: + Utils.info('You are using MySQL') + if not HAS_PYMYSQL: + Utils.error('Python pymysql module is not found') + updater = MySQLDBUpdater(version, ccnet_db_info, seafile_db_info, seahub_db_info) + + elif (ccnet_db_info is None) and (seafile_db_info is None) and (seahub_db_info is None): + Utils.info('You are using SQLite3') + if not HAS_SQLITE3: + Utils.error('Python sqlite3 module is not found') + updater = SQLiteDBUpdater(version) + + else: + def to_db_string(info): + if info is None: + return 'SQLite3' + else: + return 'MySQL' + Utils.error('Error:\n ccnet is using %s\n seafile is using %s\n seahub is using %s\n' + % (to_db_string(ccnet_db_info), + to_db_string(seafile_db_info), + to_db_string(seahub_db_info))) + + return updater + + def update_db(self): + ccnet_sql = os.path.join(self.sql_dir, 'ccnet.sql') + seafile_sql = os.path.join(self.sql_dir, 'seafile.sql') + seahub_sql = os.path.join(self.sql_dir, 'seahub.sql') + seafevents_sql = os.path.join(self.sql_dir, 'seafevents.sql') + + if os.path.exists(ccnet_sql): + Utils.info('updating ccnet database...') + self.update_ccnet_sql(ccnet_sql) + + if os.path.exists(seafile_sql): + Utils.info('updating seafile database...') + self.update_seafile_sql(seafile_sql) + + if os.path.exists(seahub_sql): + Utils.info('updating seahub database...') + self.update_seahub_sql(seahub_sql) + + if os.path.exists(seafevents_sql): + self.update_seafevents_sql(seafevents_sql) + + @staticmethod + def get_ccnet_mysql_info(version): + if version > '5.0.0': + config_path = env_mgr.central_config_dir + else: + config_path = env_mgr.ccnet_dir + + ccnet_conf = os.path.join(config_path, 'ccnet.conf') + defaults = { + 'HOST': '127.0.0.1', + 'PORT': '3306', + 'UNIX_SOCKET': '', + } + + config = Utils.read_config(ccnet_conf, defaults) + db_section = 'Database' + + if not config.has_section(db_section): + return None + + type = config.get(db_section, 'ENGINE') + if type != 'mysql': + return None + + try: + host = config.get(db_section, 'HOST') + port = config.getint(db_section, 'PORT') + username = config.get(db_section, 'USER') + password = config.get(db_section, 'PASSWD') + db = config.get(db_section, 'DB') + unix_socket = config.get(db_section, 'UNIX_SOCKET') + except configparser.NoOptionError as e: + Utils.error('Database config in ccnet.conf is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db, unix_socket) + return info + + @staticmethod + def get_seafile_mysql_info(version): + if version > '5.0.0': + config_path = env_mgr.central_config_dir + else: + config_path = env_mgr.seafile_dir + + seafile_conf = os.path.join(config_path, 'seafile.conf') + defaults = { + 'HOST': '127.0.0.1', + 'PORT': '3306', + 'UNIX_SOCKET': '', + } + config = Utils.read_config(seafile_conf, defaults) + db_section = 'database' + + if not config.has_section(db_section): + return None + + type = config.get(db_section, 'type') + if type != 'mysql': + return None + + try: + host = config.get(db_section, 'host') + port = config.getint(db_section, 'port') + username = config.get(db_section, 'user') + password = config.get(db_section, 'password') + db = config.get(db_section, 'db_name') + unix_socket = config.get(db_section, 'unix_socket') + except configparser.NoOptionError as e: + Utils.error('Database config in seafile.conf is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db, unix_socket) + return info + + @staticmethod + def get_seahub_mysql_info(): + sys.path.insert(0, env_mgr.top_dir) + if env_mgr.central_config_dir: + sys.path.insert(0, env_mgr.central_config_dir) + try: + import seahub_settings # pylint: disable=F0401 + except ImportError as e: + Utils.error('Failed to import seahub_settings.py: %s' % e) + + if not hasattr(seahub_settings, 'DATABASES'): + return None + + try: + d = seahub_settings.DATABASES['default'] + if d['ENGINE'] != 'django.db.backends.mysql': + return None + + host = d.get('HOST', '127.0.0.1') + port = int(d.get('PORT', 3306)) + username = d['USER'] + password = d['PASSWORD'] + db = d['NAME'] + unix_socket = host if host.startswith('/') else None + except KeyError: + Utils.error('Database config in seahub_settings.py is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db, unix_socket) + return info + + def update_ccnet_sql(self, ccnet_sql): + raise NotImplementedError + + def update_seafile_sql(self, seafile_sql): + raise NotImplementedError + + def update_seahub_sql(self, seahub_sql): + raise NotImplementedError + + def update_seafevents_sql(self, seafevents_sql): + raise NotImplementedError + +class CcnetSQLiteDB(object): + def __init__(self, ccnet_dir): + self.ccnet_dir = ccnet_dir + + def get_db(self, dbname): + dbs = ( + 'ccnet.db', + 'GroupMgr/groupmgr.db', + 'misc/config.db', + 'OrgMgr/orgmgr.db', + 'PeerMgr/usermgr.db', + ) + for db in dbs: + if os.path.splitext(os.path.basename(db))[0] == dbname: + return os.path.join(self.ccnet_dir, db) + +class SQLiteDBUpdater(DBUpdater): + def __init__(self, version): + DBUpdater.__init__(self, version, 'sqlite3') + + self.ccnet_db = CcnetSQLiteDB(env_mgr.ccnet_dir) + self.seafile_db = os.path.join(env_mgr.seafile_dir, 'seafile.db') + self.seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db') + self.seafevents_db = os.path.join(env_mgr.top_dir, 'seafevents.db') + + def update_db(self): + super(SQLiteDBUpdater, self).update_db() + for sql_path in glob.glob(os.path.join(self.sql_dir, 'ccnet', '*.sql')): + self.update_ccnet_sql(sql_path) + + def apply_sqls(self, db_path, sql_path): + with open(sql_path, 'r') as fp: + lines = fp.read().split(';') + + with sqlite3.connect(db_path) as conn: + for line in lines: + line = line.strip() + if not line: + continue + else: + conn.execute(line) + + def update_ccnet_sql(self, sql_path): + dbname = os.path.splitext(os.path.basename(sql_path))[0] + self.apply_sqls(self.ccnet_db.get_db(dbname), sql_path) + + def update_seafile_sql(self, sql_path): + self.apply_sqls(self.seafile_db, sql_path) + + def update_seahub_sql(self, sql_path): + self.apply_sqls(self.seahub_db, sql_path) + + def update_seafevents_sql(self, sql_path): + if self.is_pro: + Utils.info('seafevents do not support sqlite3 database') + + +class MySQLDBUpdater(DBUpdater): + def __init__(self, version, ccnet_db_info, seafile_db_info, seahub_db_info): + DBUpdater.__init__(self, version, 'mysql') + self.ccnet_db_info = ccnet_db_info + self.seafile_db_info = seafile_db_info + self.seahub_db_info = seahub_db_info + + def update_ccnet_sql(self, ccnet_sql): + self.apply_sqls(self.ccnet_db_info, ccnet_sql) + + def update_seafile_sql(self, seafile_sql): + self.apply_sqls(self.seafile_db_info, seafile_sql) + + def update_seahub_sql(self, seahub_sql): + self.apply_sqls(self.seahub_db_info, seahub_sql) + + def update_seafevents_sql(self, seafevents_sql): + if self.is_pro: + Utils.info('updating seafevents database...') + self.apply_sqls(self.seahub_db_info, seafevents_sql) + + def get_conn(self, info): + kw = dict( + user=info.username, + passwd=info.password, + db=info.db, + ) + if info.unix_socket: + kw['unix_socket'] = info.unix_socket + else: + kw['host'] = info.host + kw['port'] = info.port + try: + conn = pymysql.connect(**kw) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + msg = str(e.args[1]) + else: + msg = str(e) + Utils.error('Failed to connect to mysql database %s: %s' % (info.db, msg)) + + return conn + + def execute_sql(self, conn, sql): + cursor = conn.cursor() + try: + cursor.execute(sql) + conn.commit() + except Exception as e: + msg = str(e) + Utils.warning('Failed to execute sql: %s' % msg) + + def apply_sqls(self, info, sql_path): + with open(sql_path, 'r') as fp: + lines = fp.read().split(';') + + conn = self.get_conn(info) + + for line in lines: + line = line.strip() + if not line: + continue + else: + self.execute_sql(conn, line) + + +def main(): + skipdb = os.environ.get('SEAFILE_SKIP_DB_UPGRADE', '').lower() + if skipdb in ('1', 'true', 'on'): + print('Database upgrade skipped because SEAFILE_SKIP_DB_UPGRADE=%s' % skipdb) + sys.exit() + version = sys.argv[1] + db_updater = DBUpdater.get_instance(version) + db_updater.update_db() + + return 0 + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/fix_mysql_user.py b/scripts/upgrade/fix_mysql_user.py new file mode 100644 index 0000000000..4e5d9ec4cc --- /dev/null +++ b/scripts/upgrade/fix_mysql_user.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python + +import os +import sys +import re +import configparser +import getpass +from collections import namedtuple + +try: + import pymysql + HAS_PYMYSQL = True +except ImportError: + HAS_PYMYSQL = False + +MySQLDBInfo = namedtuple('MySQLDBInfo', 'host port username password db') + +class EnvManager(object): + def __init__(self): + self.upgrade_dir = os.path.abspath(os.path.dirname(__file__)) + self.install_path = os.path.dirname(self.upgrade_dir) + self.top_dir = os.path.dirname(self.install_path) + self.ccnet_dir = os.environ['CCNET_CONF_DIR'] + self.seafile_dir = os.environ['SEAFILE_CONF_DIR'] + +env_mgr = EnvManager() + +class Utils(object): + @staticmethod + def highlight(content, is_error=False): + '''Add ANSI color to content to get it highlighted on terminal''' + if is_error: + return '\x1b[1;31m%s\x1b[m' % content + else: + return '\x1b[1;32m%s\x1b[m' % content + + @staticmethod + def info(msg): + print(Utils.highlight('[INFO] ') + msg) + + @staticmethod + def error(msg): + print(Utils.highlight('[ERROR] ') + msg) + sys.exit(1) + + @staticmethod + def read_config(config_path, defaults): + cp = configparser.ConfigParser(defaults) + cp.read(config_path) + return cp + +def get_ccnet_mysql_info(): + ccnet_conf = os.path.join(env_mgr.ccnet_dir, 'ccnet.conf') + defaults = { + 'HOST': '127.0.0.1', + 'PORT': '3306', + } + + config = Utils.read_config(ccnet_conf, defaults) + db_section = 'Database' + + if not config.has_section(db_section): + return None + + type = config.get(db_section, 'ENGINE') + if type != 'mysql': + return None + + try: + host = config.get(db_section, 'HOST') + port = config.getint(db_section, 'PORT') + username = config.get(db_section, 'USER') + password = config.get(db_section, 'PASSWD') + db = config.get(db_section, 'DB') + except configparser.NoOptionError as e: + Utils.error('Database config in ccnet.conf is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db) + return info + +def get_seafile_mysql_info(): + seafile_conf = os.path.join(env_mgr.seafile_dir, 'seafile.conf') + defaults = { + 'HOST': '127.0.0.1', + 'PORT': '3306', + } + config = Utils.read_config(seafile_conf, defaults) + db_section = 'database' + + if not config.has_section(db_section): + return None + + type = config.get(db_section, 'type') + if type != 'mysql': + return None + + try: + host = config.get(db_section, 'host') + port = config.getint(db_section, 'port') + username = config.get(db_section, 'user') + password = config.get(db_section, 'password') + db = config.get(db_section, 'db_name') + except configparser.NoOptionError as e: + Utils.error('Database config in seafile.conf is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db) + return info + +def get_seahub_mysql_info(): + sys.path.insert(0, env_mgr.top_dir) + try: + import seahub_settings# pylint: disable=F0401 + except ImportError as e: + Utils.error('Failed to import seahub_settings.py: %s' % e) + + if not hasattr(seahub_settings, 'DATABASES'): + return None + + try: + d = seahub_settings.DATABASES['default'] + if d['ENGINE'] != 'django.db.backends.mysql': + return None + + host = d.get('HOST', '127.0.0.1') + port = int(d.get('PORT', 3306)) + username = d['USER'] + password = d['PASSWORD'] + db = d['NAME'] + except KeyError: + Utils.error('Database config in seahub_settings.py is invalid: %s' % e) + + info = MySQLDBInfo(host, port, username, password, db) + return info + +def get_seafile_db_infos(): + ccnet_db_info = get_ccnet_mysql_info() + seafile_db_info = get_seafile_mysql_info() + seahub_db_info = get_seahub_mysql_info() + + infos = [ccnet_db_info, seafile_db_info, seahub_db_info] + + for info in infos: + if info is None: + return None + if info.host not in ('localhost', '127.0.0.1'): + return None + return infos + +def ask_root_password(port): + while True: + desc = 'What is the root password for mysql? ' + password = getpass.getpass(desc).strip() + if password: + try: + return check_mysql_user('root', password, port) + except InvalidAnswer as e: + print('\n%s\n' % e) + continue + +class InvalidAnswer(Exception): + def __init__(self, msg): + Exception.__init__(self) + self.msg = msg + + def __str__(self): + return self.msg + +def check_mysql_user(user, password, port): + print('\nverifying password of root user %s ... ' % user, end=' ') + kwargs = dict(host='localhost', + port=port, + user=user, + passwd=password) + + try: + conn = pymysql.connect(**kwargs) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' + % (user, e.args[1])) + else: + raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' + % (user, e)) + + print('done') + return conn + +def apply_fix(root_conn, user, dbs): + for db in dbs: + grant_db_permission(root_conn, user, db) + + cursor = root_conn.cursor() + sql = """ + SELECT * + FROM mysql.user + WHERE Host = '%%' + AND password = '' + AND User = '%s' + """ % user + cursor.execute(sql) + if cursor.rowcount > 0: + sql = 'DROP USER `%s`@`%%`' % user + cursor.execute(sql) + +def grant_db_permission(conn, user, db): + cursor = conn.cursor() + sql = '''GRANT ALL PRIVILEGES ON `%s`.* to `%s`@localhost ''' \ + % (db, user) + + try: + cursor.execute(sql) + except Exception as e: + if isinstance(e, pymysql.err.OperationalError): + Utils.error('Failed to grant permission of database %s: %s' % (db, e.args[1])) + else: + Utils.error('Failed to grant permission of database %s: %s' % (db, e)) + + finally: + cursor.close() + +def main(): + dbinfos = get_seafile_db_infos() + if not dbinfos: + return + if dbinfos[0].username == 'root': + return + + if not HAS_PYMYSQL: + Utils.error('Python pymysql module is not found') + root_conn = ask_root_password(dbinfos[0].port) + apply_fix(root_conn, dbinfos[0].username, [info.db for info in dbinfos]) + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/minor-upgrade.sh b/scripts/upgrade/minor-upgrade.sh new file mode 100755 index 0000000000..62bc371f14 --- /dev/null +++ b/scripts/upgrade/minor-upgrade.sh @@ -0,0 +1,168 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ + +echo +echo "-------------------------------------------------------------" +echo "This script would do the minor upgrade for you." +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars +seafile_server_symlink=${TOPDIR}/seafile-server-latest +default_conf_dir=${TOPDIR}/conf +default_ccnet_conf_dir=${TOPDIR}/ccnet +seahub_data_dir=${TOPDIR}/seahub-data +elasticsearch_config_file=${seafile_server_symlink}/pro/elasticsearch/config/jvm.options + +function migrate_avatars() { + echo + echo "------------------------------" + echo "migrating avatars ..." + echo + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + echo + echo "Error: avatars directory \"${dest_avatar_dir}\" does not exist" 2>&1 + echo + exit 1 + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo + echo "DONE" + echo "------------------------------" + echo +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function update_latest_symlink() { + # update the symlink seafile-server to the new server version + echo + echo "updating seafile-server-latest symbolic link to ${INSTALLPATH} ..." + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi +} + +function move_old_elasticsearch_config_to_latest() { + # Move the elasticsearch's configuration file from the old version to the new version + echo + echo "Moving the elasticsearch's configuration file ..." + echo + if [[ -f ${elasticsearch_config_file} ]]; then + /bin/cp -avf ${elasticsearch_config_file} ${INSTALLPATH}/pro/elasticsearch/config/jvm.options + fi +} + +function read_seafile_data_dir() { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ -f ${seafile_ini} ]]; then + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + else + if [[ ${seafile_data_dir} != ${TOPDIR}/seafile-data ]]; then + if [[ ! -L ${TOPDIR}/seafile-data ]]; then + ln -s ${seafile_data_dir} ${TOPDIR}/seafile-data + echo "Created the symlink ${TOPDIR}/seafile-data for ${seafile_data_dir}." + fi + fi + fi + fi +} + +function rename_gunicorn_config() { + echo + echo "renaming the gunicorn.conf to gunicorn.conf.py ..." + echo + if [[ -f "${default_conf_dir}/gunicorn.conf" ]]; then + mv "${default_conf_dir}/gunicorn.conf" "${default_conf_dir}/gunicorn.conf.py" 1>/dev/null + fi + + if [[ -f "${default_conf_dir}/gunicorn.conf.py" ]]; then + echo 'Done' + else + echo "Failed to renamed the gunicorn.conf to gunicorn.conf.py." + exit 1 + fi +} + +read_seafile_data_dir; +rename_gunicorn_config; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; + +move_old_elasticsearch_config_to_latest; + +update_latest_symlink; + + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/regenerate_secret_key.sh b/scripts/upgrade/regenerate_secret_key.sh new file mode 100755 index 0000000000..b59f44d182 --- /dev/null +++ b/scripts/upgrade/regenerate_secret_key.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") +UPGRADEDIR=$(dirname "${SCRIPT}") +INSTALLPATH=$(dirname "${UPGRADEDIR}") +TOPDIR=$(dirname "${INSTALLPATH}") + +seahub_secret_keygen=${INSTALLPATH}/seahub/tools/secret_key_generator.py +seahub_settings_py=${TOPDIR}/seahub_settings.py + +line="SECRET_KEY = \"$(python $seahub_secret_keygen)\"" + +sed -i -e "/SECRET_KEY/c\\$line" $seahub_settings_py diff --git a/scripts/upgrade/sql/1.6.0/mysql/seahub.sql b/scripts/upgrade/sql/1.6.0/mysql/seahub.sql new file mode 100644 index 0000000000..c870654949 --- /dev/null +++ b/scripts/upgrade/sql/1.6.0/mysql/seahub.sql @@ -0,0 +1,47 @@ +CREATE TABLE IF NOT EXISTS `wiki_groupwiki` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` int(11) NOT NULL, + `repo_id` varchar(36) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `group_id` (`group_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `wiki_personalwiki` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `username` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `group_publicgroup` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `group_publicgroup_425ae3c4` (`group_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `base_filediscuss` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_message_id` int(11) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `path_hash` varchar(12) NOT NULL, + PRIMARY KEY (`id`), + KEY `base_filediscuss_3c1a2584` (`group_message_id`), + KEY `base_filediscuss_6844bd5a` (`path_hash`), + CONSTRAINT `group_message_id_refs_id_2ade200f` FOREIGN KEY (`group_message_id`) REFERENCES `group_groupmessage` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `base_filelastmodifiedinfo` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `file_id` varchar(40) NOT NULL, + `file_path` longtext NOT NULL, + `file_path_hash` varchar(12) NOT NULL, + `last_modified` bigint(20) NOT NULL, + `email` varchar(75) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `repo_id` (`repo_id`,`file_path_hash`), + KEY `base_filelastmodifiedinfo_359081cc` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 ; \ No newline at end of file diff --git a/scripts/upgrade/sql/1.6.0/sqlite3/seahub.sql b/scripts/upgrade/sql/1.6.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..b1a974401a --- /dev/null +++ b/scripts/upgrade/sql/1.6.0/sqlite3/seahub.sql @@ -0,0 +1,39 @@ +CREATE TABLE IF NOT EXISTS "wiki_groupwiki" ( + "id" integer NOT NULL PRIMARY KEY, + "group_id" integer NOT NULL UNIQUE, + "repo_id" varchar(36) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "wiki_personalwiki" ( + "id" integer NOT NULL PRIMARY KEY, + "username" varchar(256) NOT NULL UNIQUE, + "repo_id" varchar(36) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "group_publicgroup" ( + "id" integer NOT NULL PRIMARY KEY, + "group_id" integer NOT NULL +); +CREATE INDEX IF NOT EXISTS "group_publicgroup_bda51c3c" ON "group_publicgroup" ("group_id"); + +CREATE TABLE IF NOT EXISTS "base_filediscuss" ( + "id" integer NOT NULL PRIMARY KEY, + "group_message_id" integer NOT NULL REFERENCES "group_groupmessage" ("id"), + "repo_id" varchar(40) NOT NULL, + "path" text NOT NULL, + "path_hash" varchar(12) NOT NULL +); +CREATE INDEX IF NOT EXISTS "base_filediscuss_6844bd5a" ON "base_filediscuss" ("path_hash"); +CREATE INDEX IF NOT EXISTS "base_filediscuss_c3e5da7c" ON "base_filediscuss" ("group_message_id"); + +CREATE TABLE IF NOT EXISTS "base_filelastmodifiedinfo" ( + "id" integer NOT NULL PRIMARY KEY, + "repo_id" varchar(36) NOT NULL, + "file_id" varchar(40) NOT NULL, + "file_path" text NOT NULL, + "file_path_hash" varchar(12) NOT NULL, + "last_modified" bigint NOT NULL, + "email" varchar(75) NOT NULL, + UNIQUE ("repo_id", "file_path_hash") +); +CREATE INDEX IF NOT EXISTS "base_filelastmodifiedinfo_ca6f7e34" ON "base_filelastmodifiedinfo" ("repo_id"); \ No newline at end of file diff --git a/scripts/upgrade/sql/1.7.0/mysql/seafile.sql b/scripts/upgrade/sql/1.7.0/mysql/seafile.sql new file mode 100644 index 0000000000..5dfc278c04 --- /dev/null +++ b/scripts/upgrade/sql/1.7.0/mysql/seafile.sql @@ -0,0 +1 @@ +CREATE INDEX repousertoken_email on RepoUserToken(email); diff --git a/scripts/upgrade/sql/1.7.0/mysql/seahub.sql b/scripts/upgrade/sql/1.7.0/mysql/seahub.sql new file mode 100644 index 0000000000..6ad01f3366 --- /dev/null +++ b/scripts/upgrade/sql/1.7.0/mysql/seahub.sql @@ -0,0 +1,17 @@ +CREATE TABLE `message_usermessage` ( + `message_id` int(11) NOT NULL AUTO_INCREMENT, + `message` varchar(512) NOT NULL, + `from_email` varchar(75) NOT NULL, + `to_email` varchar(75) NOT NULL, + `timestamp` datetime NOT NULL, + `ifread` tinyint(1) NOT NULL, + PRIMARY KEY (`message_id`), + KEY `message_usermessage_8b1dd4eb` (`from_email`), + KEY `message_usermessage_590d1560` (`to_email`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `message_usermsglastcheck` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `check_time` datetime NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; \ No newline at end of file diff --git a/scripts/upgrade/sql/1.7.0/sqlite3/seafile.sql b/scripts/upgrade/sql/1.7.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..9b5a1c32ee --- /dev/null +++ b/scripts/upgrade/sql/1.7.0/sqlite3/seafile.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS repousertoken_email on RepoUserToken(email); \ No newline at end of file diff --git a/scripts/upgrade/sql/1.7.0/sqlite3/seahub.sql b/scripts/upgrade/sql/1.7.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..c21efddca5 --- /dev/null +++ b/scripts/upgrade/sql/1.7.0/sqlite3/seahub.sql @@ -0,0 +1,16 @@ +CREATE TABLE IF NOT EXISTS "message_usermessage" ( + "message_id" integer NOT NULL PRIMARY KEY, + "message" varchar(512) NOT NULL, + "from_email" varchar(75) NOT NULL, + "to_email" varchar(75) NOT NULL, + "timestamp" datetime NOT NULL, + "ifread" bool NOT NULL +) +; +CREATE TABLE IF NOT EXISTS "message_usermsglastcheck" ( + "id" integer NOT NULL PRIMARY KEY, + "check_time" datetime NOT NULL +) +; +CREATE INDEX IF NOT EXISTS "message_usermessage_8b1dd4eb" ON "message_usermessage" ("from_email"); +CREATE INDEX IF NOT EXISTS "message_usermessage_590d1560" ON "message_usermessage" ("to_email"); diff --git a/scripts/upgrade/sql/1.8.0/mysql/ccnet.sql b/scripts/upgrade/sql/1.8.0/mysql/ccnet.sql new file mode 100644 index 0000000000..5ee7e0cb05 --- /dev/null +++ b/scripts/upgrade/sql/1.8.0/mysql/ccnet.sql @@ -0,0 +1,2 @@ +-- ccnet +ALTER TABLE EmailUser MODIFY passwd varchar(64); diff --git a/scripts/upgrade/sql/1.8.0/mysql/seahub.sql b/scripts/upgrade/sql/1.8.0/mysql/seahub.sql new file mode 100644 index 0000000000..f1c79e791d --- /dev/null +++ b/scripts/upgrade/sql/1.8.0/mysql/seahub.sql @@ -0,0 +1,30 @@ +-- seahub +ALTER TABLE group_groupmessage MODIFY message varchar(2048); +ALTER TABLE group_messagereply MODIFY message varchar(2048); + +CREATE TABLE IF NOT EXISTS `share_privatefiledirshare` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `from_user` varchar(255) NOT NULL, + `to_user` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `token` varchar(10) NOT NULL, + `permission` varchar(5) NOT NULL, + `s_type` varchar(5) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `token` (`token`), + KEY `share_privatefiledirshare_0e7efed3` (`from_user`), + KEY `share_privatefiledirshare_bc172800` (`to_user`), + KEY `share_privatefiledirshare_2059abe4` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `message_usermsgattachment` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user_msg_id` int(11) NOT NULL, + `priv_file_dir_share_id` int(11) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `message_usermsgattachment_72f290f5` (`user_msg_id`), + KEY `message_usermsgattachment_cee41a9a` (`priv_file_dir_share_id`), + CONSTRAINT `priv_file_dir_share_id_refs_id_163f8f83` FOREIGN KEY (`priv_file_dir_share_id`) REFERENCES `share_privatefiledirshare` (`id`), + CONSTRAINT `user_msg_id_refs_message_id_debb82ad` FOREIGN KEY (`user_msg_id`) REFERENCES `message_usermessage` (`message_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; \ No newline at end of file diff --git a/scripts/upgrade/sql/1.8.0/sqlite3/seahub.sql b/scripts/upgrade/sql/1.8.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..52658083c1 --- /dev/null +++ b/scripts/upgrade/sql/1.8.0/sqlite3/seahub.sql @@ -0,0 +1,20 @@ +CREATE TABLE IF NOT EXISTS "share_privatefiledirshare" ( + "id" integer NOT NULL PRIMARY KEY, + "from_user" varchar(255) NOT NULL, + "to_user" varchar(255) NOT NULL, + "repo_id" varchar(36) NOT NULL, + "path" text NOT NULL, + "token" varchar(10) NOT NULL UNIQUE, + "permission" varchar(5) NOT NULL, + "s_type" varchar(5) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "message_usermsgattachment" ( + "id" integer NOT NULL PRIMARY KEY, + "user_msg_id" integer NOT NULL REFERENCES "message_usermessage" ("message_id"), + "priv_file_dir_share_id" integer REFERENCES "share_privatefiledirshare" ("id") +); + +CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_0e7efed3" ON "share_privatefiledirshare" ("from_user"); +CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_2059abe4" ON "share_privatefiledirshare" ("repo_id"); +CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_bc172800" ON "share_privatefiledirshare" ("to_user"); \ No newline at end of file diff --git a/scripts/upgrade/sql/2.0.0/mysql/seahub.sql b/scripts/upgrade/sql/2.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..f7cb4ab722 --- /dev/null +++ b/scripts/upgrade/sql/2.0.0/mysql/seahub.sql @@ -0,0 +1,24 @@ +-- seahub +CREATE TABLE IF NOT EXISTS `base_groupenabledmodule` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` varchar(10) NOT NULL, + `module_name` varchar(20) NOT NULL, + PRIMARY KEY (`id`), + KEY `base_groupenabledmodule_dc00373b` (`group_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `base_userenabledmodule` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `module_name` varchar(20) NOT NULL, + PRIMARY KEY (`id`), + KEY `base_userenabledmodule_ee0cafa2` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `base_userlastlogin` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `last_login` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `base_userlastlogin_ee0cafa2` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/2.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/2.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..9d0dae3e45 --- /dev/null +++ b/scripts/upgrade/sql/2.0.0/sqlite3/seahub.sql @@ -0,0 +1,20 @@ +CREATE TABLE IF NOT EXISTS "base_groupenabledmodule" ( + "id" integer NOT NULL PRIMARY KEY, + "group_id" varchar(10) NOT NULL, + "module_name" varchar(20) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "base_userenabledmodule" ( + "id" integer NOT NULL PRIMARY KEY, + "username" varchar(255) NOT NULL, + "module_name" varchar(20) NOT NULL +); + +CREATE TABLE IF NOT EXISTS "base_userlastlogin" ( + "id" integer NOT NULL PRIMARY KEY, + "username" varchar(255) NOT NULL, + "last_login" datetime NOT NULL +); + +CREATE INDEX IF NOT EXISTS "base_groupenabledmodule_dc00373b" ON "base_groupenabledmodule" ("group_id"); +CREATE INDEX IF NOT EXISTS "base_userenabledmodule_ee0cafa2" ON "base_userenabledmodule" ("username"); diff --git a/scripts/upgrade/sql/2.1.0/mysql/seahub.sql b/scripts/upgrade/sql/2.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..391b688beb --- /dev/null +++ b/scripts/upgrade/sql/2.1.0/mysql/seahub.sql @@ -0,0 +1,53 @@ +CREATE TABLE IF NOT EXISTS `captcha_captchastore` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `challenge` varchar(32) NOT NULL, + `response` varchar(32) NOT NULL, + `hashkey` varchar(40) NOT NULL, + `expiration` datetime NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `hashkey` (`hashkey`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +DROP TABLE IF EXISTS `notifications_usernotification`; +CREATE TABLE IF NOT EXISTS `notifications_usernotification` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `to_user` varchar(255) NOT NULL, + `msg_type` varchar(30) NOT NULL, + `detail` longtext NOT NULL, + `timestamp` datetime NOT NULL, + `seen` tinyint(1) NOT NULL, + PRIMARY KEY (`id`), + KEY `notifications_usernotification_bc172800` (`to_user`), + KEY `notifications_usernotification_265e5521` (`msg_type`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `options_useroptions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `email` varchar(255) NOT NULL, + `option_key` varchar(50) NOT NULL, + `option_val` varchar(50) NOT NULL, + PRIMARY KEY (`id`), + KEY `options_useroptions_830a6ccb` (`email`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `profile_detailedprofile` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(255) NOT NULL, + `department` varchar(512) NOT NULL, + `telephone` varchar(100) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `share_uploadlinkshare` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `token` varchar(10) NOT NULL, + `ctime` datetime NOT NULL, + `view_cnt` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `token` (`token`), + KEY `share_uploadlinkshare_ee0cafa2` (`username`), + KEY `share_uploadlinkshare_2059abe4` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/2.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/2.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..f6b0c9c115 --- /dev/null +++ b/scripts/upgrade/sql/2.1.0/sqlite3/seahub.sql @@ -0,0 +1,48 @@ +CREATE TABLE IF NOT EXISTS "captcha_captchastore" ( + "id" integer NOT NULL PRIMARY KEY, + "challenge" varchar(32) NOT NULL, + "response" varchar(32) NOT NULL, + "hashkey" varchar(40) NOT NULL UNIQUE, + "expiration" datetime NOT NULL +); + +DROP TABLE IF EXISTS "notifications_usernotification"; +CREATE TABLE IF NOT EXISTS "notifications_usernotification" ( + "id" integer NOT NULL PRIMARY KEY, + "to_user" varchar(255) NOT NULL, + "msg_type" varchar(30) NOT NULL, + "detail" text NOT NULL, + "timestamp" datetime NOT NULL, + "seen" bool NOT NULL +); + +CREATE INDEX IF NOT EXISTS "notifications_usernotification_265e5521" ON "notifications_usernotification" ("msg_type"); +CREATE INDEX IF NOT EXISTS "notifications_usernotification_bc172800" ON "notifications_usernotification" ("to_user"); + +CREATE TABLE IF NOT EXISTS "options_useroptions" ( + "id" integer NOT NULL PRIMARY KEY, + "email" varchar(255) NOT NULL, + "option_key" varchar(50) NOT NULL, + "option_val" varchar(50) NOT NULL +); +CREATE INDEX IF NOT EXISTS "options_useroptions_830a6ccb" ON "options_useroptions" ("email"); + +CREATE TABLE IF NOT EXISTS "profile_detailedprofile" ( + "id" integer NOT NULL PRIMARY KEY, + "user" varchar(255) NOT NULL, + "department" varchar(512) NOT NULL, + "telephone" varchar(100) NOT NULL +); +CREATE INDEX IF NOT EXISTS "profile_detailedprofile_6340c63c" ON "profile_detailedprofile" ("user"); + +CREATE TABLE IF NOT EXISTS "share_uploadlinkshare" ( + "id" integer NOT NULL PRIMARY KEY, + "username" varchar(255) NOT NULL, + "repo_id" varchar(36) NOT NULL, + "path" text NOT NULL, + "token" varchar(10) NOT NULL UNIQUE, + "ctime" datetime NOT NULL, + "view_cnt" integer NOT NULL +); +CREATE INDEX IF NOT EXISTS "share_uploadlinkshare_2059abe4" ON "share_uploadlinkshare" ("repo_id"); +CREATE INDEX IF NOT EXISTS "share_uploadlinkshare_ee0cafa2" ON "share_uploadlinkshare" ("username"); diff --git a/scripts/upgrade/sql/2.2.0/mysql/ccnet.sql b/scripts/upgrade/sql/2.2.0/mysql/ccnet.sql new file mode 100644 index 0000000000..88385eedd6 --- /dev/null +++ b/scripts/upgrade/sql/2.2.0/mysql/ccnet.sql @@ -0,0 +1,2 @@ +ALTER TABLE EmailUser MODIFY passwd varchar(256); + diff --git a/scripts/upgrade/sql/3.0.0/mysql/seahub.sql b/scripts/upgrade/sql/3.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..7e656ba0e4 --- /dev/null +++ b/scripts/upgrade/sql/3.0.0/mysql/seahub.sql @@ -0,0 +1,13 @@ +CREATE TABLE IF NOT EXISTS `api2_tokenv2` ( + `key` varchar(40) NOT NULL, + `user` varchar(255) NOT NULL, + `platform` varchar(32) NOT NULL, + `device_id` varchar(40) NOT NULL, + `device_name` varchar(40) NOT NULL, + `platform_version` varchar(16) NOT NULL, + `client_version` varchar(16) NOT NULL, + `last_accessed` datetime NOT NULL, + `last_login_ip` char(39) DEFAULT NULL, + PRIMARY KEY (`key`), + UNIQUE KEY `user` (`user`,`platform`,`device_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..c05453ac37 --- /dev/null +++ b/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql @@ -0,0 +1,12 @@ +CREATE TABLE IF NOT EXISTS "api2_tokenv2" ( + "key" varchar(40) NOT NULL PRIMARY KEY, + "user" varchar(255) NOT NULL, + "platform" varchar(32) NOT NULL, + "device_id" varchar(40) NOT NULL, + "device_name" varchar(40) NOT NULL, + "platform_version" varchar(16) NOT NULL, + "client_version" varchar(16) NOT NULL, + "last_accessed" datetime NOT NULL, + "last_login_ip" char(39), + UNIQUE ("user", "platform", "device_id") +); diff --git a/scripts/upgrade/sql/3.1.0/mysql/seahub.sql b/scripts/upgrade/sql/3.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..ad139cd160 --- /dev/null +++ b/scripts/upgrade/sql/3.1.0/mysql/seahub.sql @@ -0,0 +1,20 @@ +alter table message_usermessage add column sender_deleted_at datetime DEFAULT NULL; +alter table message_usermessage add column recipient_deleted_at datetime DEFAULT NULL; + +alter table share_fileshare add column password varchar(128); +alter table share_fileshare add column expire_date datetime; +alter table share_uploadlinkshare add column password varchar(128); +alter table share_uploadlinkshare add column expire_date datetime; +alter table profile_profile add column lang_code varchar(50) DEFAULT NULL; + +CREATE TABLE IF NOT EXISTS `share_orgfileshare` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `org_id` int(11) NOT NULL, + `file_share_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `file_share_id` (`file_share_id`), + KEY `share_orgfileshare_944dadb6` (`org_id`), + CONSTRAINT `file_share_id_refs_id_bd2fd9f8` FOREIGN KEY (`file_share_id`) REFERENCES `share_fileshare` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `base_userstarredfiles` ADD INDEX `base_userstarredfiles_email` (email); diff --git a/scripts/upgrade/sql/3.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/3.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..42846156a4 --- /dev/null +++ b/scripts/upgrade/sql/3.1.0/sqlite3/seahub.sql @@ -0,0 +1,16 @@ +alter table "message_usermessage" add column "sender_deleted_at" datetime; +alter table "message_usermessage" add column "recipient_deleted_at" datetime; +alter table "share_fileshare" add column "password" varchar(128); +alter table "share_fileshare" add column "expire_date" datetime; +alter table "share_uploadlinkshare" add column "password" varchar(128); +alter table "share_uploadlinkshare" add column "expire_date" datetime; +alter table "profile_profile" add column "lang_code" varchar(50); + +CREATE TABLE IF NOT EXISTS "share_orgfileshare" ( + "id" integer NOT NULL PRIMARY KEY, + "org_id" integer NOT NULL, + "file_share_id" integer NOT NULL UNIQUE REFERENCES "share_fileshare" ("id") +); +CREATE INDEX IF NOT EXISTS "share_orgfileshare_944dadb6" ON "share_orgfileshare" ("org_id"); + +CREATE INDEX IF NOT EXISTS "base_userstarredfiles_email" on "base_userstarredfiles" ("email"); diff --git a/scripts/upgrade/sql/4.1.0/mysql/ccnet.sql b/scripts/upgrade/sql/4.1.0/mysql/ccnet.sql new file mode 100644 index 0000000000..42e78881ea --- /dev/null +++ b/scripts/upgrade/sql/4.1.0/mysql/ccnet.sql @@ -0,0 +1 @@ +ALTER TABLE `Group` ADD type VARCHAR(32); diff --git a/scripts/upgrade/sql/4.1.0/mysql/seafile.sql b/scripts/upgrade/sql/4.1.0/mysql/seafile.sql new file mode 100644 index 0000000000..f82e4b75a4 --- /dev/null +++ b/scripts/upgrade/sql/4.1.0/mysql/seafile.sql @@ -0,0 +1,30 @@ +ALTER TABLE SharedRepo MODIFY from_email VARCHAR(255); +ALTER TABLE SharedRepo MODIFY to_email VARCHAR(255); +ALTER TABLE SharedRepo ADD INDEX (from_email); +ALTER TABLE SharedRepo ADD INDEX (to_email); + +CREATE TABLE IF NOT EXISTS OrgSharedRepo ( + id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + org_id INT, + repo_id CHAR(37) , + from_email VARCHAR(255), + to_email VARCHAR(255), + permission CHAR(15), + INDEX (org_id, repo_id), + INDEX(from_email), + INDEX(to_email) +) ENGINE=INNODB; + +ALTER TABLE OrgSharedRepo MODIFY from_email VARCHAR(255); +ALTER TABLE OrgSharedRepo MODIFY to_email VARCHAR(255); + +CREATE TABLE IF NOT EXISTS RepoTrash ( + repo_id CHAR(36) PRIMARY KEY, + repo_name VARCHAR(255), + head_id CHAR(40), + owner_id VARCHAR(255), + size BIGINT(20), + org_id INTEGER, + INDEX(owner_id), + INDEX(org_id) +) ENGINE=INNODB; diff --git a/scripts/upgrade/sql/4.1.0/sqlite3/ccnet/groupmgr.sql b/scripts/upgrade/sql/4.1.0/sqlite3/ccnet/groupmgr.sql new file mode 100644 index 0000000000..42e78881ea --- /dev/null +++ b/scripts/upgrade/sql/4.1.0/sqlite3/ccnet/groupmgr.sql @@ -0,0 +1 @@ +ALTER TABLE `Group` ADD type VARCHAR(32); diff --git a/scripts/upgrade/sql/4.1.0/sqlite3/seafile.sql b/scripts/upgrade/sql/4.1.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..71147f2416 --- /dev/null +++ b/scripts/upgrade/sql/4.1.0/sqlite3/seafile.sql @@ -0,0 +1,14 @@ +CREATE INDEX IF NOT EXISTS FromEmailIndex on SharedRepo (from_email); +CREATE INDEX IF NOT EXISTS ToEmailIndex on SharedRepo (to_email); + +CREATE TABLE IF NOT EXISTS RepoTrash ( + repo_id CHAR(36) PRIMARY KEY, + repo_name VARCHAR(255), + head_id CHAR(40), + owner_id VARCHAR(255), + size BIGINT UNSIGNED, + org_id INTEGER +); + +CREATE INDEX IF NOT EXISTS repotrash_owner_id_idx ON RepoTrash(owner_id); +CREATE INDEX IF NOT EXISTS repotrash_org_id_idx ON RepoTrash(org_id); diff --git a/scripts/upgrade/sql/4.2.0/mysql/seafile.sql b/scripts/upgrade/sql/4.2.0/mysql/seafile.sql new file mode 100644 index 0000000000..45a06dd949 --- /dev/null +++ b/scripts/upgrade/sql/4.2.0/mysql/seafile.sql @@ -0,0 +1 @@ +alter table RepoTrash add del_time BIGINT; diff --git a/scripts/upgrade/sql/4.2.0/mysql/seahub.sql b/scripts/upgrade/sql/4.2.0/mysql/seahub.sql new file mode 100644 index 0000000000..b62681844d --- /dev/null +++ b/scripts/upgrade/sql/4.2.0/mysql/seahub.sql @@ -0,0 +1,18 @@ +CREATE TABLE IF NOT EXISTS `base_clientlogintoken` ( + `token` varchar(32) NOT NULL, + `username` varchar(255) NOT NULL, + `timestamp` datetime NOT NULL, + PRIMARY KEY (`token`), + KEY `base_clientlogintoken_ee0cafa2` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `organizations_orgmemberquota` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `org_id` int(11) NOT NULL, + `quota` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `organizations_orgmemberquota_944dadb6` (`org_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +REPLACE INTO django_content_type VALUES(44,'client login token','base','clientlogintoken'); +REPLACE INTO django_content_type VALUES(45,'org member quota','organizations','orgmemberquota'); diff --git a/scripts/upgrade/sql/4.2.0/sqlite3/seafile.sql b/scripts/upgrade/sql/4.2.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..45a06dd949 --- /dev/null +++ b/scripts/upgrade/sql/4.2.0/sqlite3/seafile.sql @@ -0,0 +1 @@ +alter table RepoTrash add del_time BIGINT; diff --git a/scripts/upgrade/sql/4.2.0/sqlite3/seahub.sql b/scripts/upgrade/sql/4.2.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..6bd3b520aa --- /dev/null +++ b/scripts/upgrade/sql/4.2.0/sqlite3/seahub.sql @@ -0,0 +1,18 @@ +CREATE TABLE IF NOT EXISTS "base_clientlogintoken" ( + "token" varchar(32) NOT NULL PRIMARY KEY, + "username" varchar(255) NOT NULL, + "timestamp" datetime NOT NULL +); + +CREATE INDEX IF NOT EXISTS "base_clientlogintoken_ee0cafa2" ON "base_clientlogintoken" ("username"); + +CREATE TABLE IF NOT EXISTS "organizations_orgmemberquota" ( + "id" integer NOT NULL PRIMARY KEY, + "org_id" integer NOT NULL, + "quota" integer NOT NULL +); + +CREATE INDEX IF NOT EXISTS "organizations_orgmemberquota_944dadb6" ON "organizations_orgmemberquota" ("org_id"); + +REPLACE INTO "django_content_type" VALUES(44,'client login token','base','clientlogintoken'); +REPLACE INTO "django_content_type" VALUES(45,'org member quota','organizations','orgmemberquota'); diff --git a/scripts/upgrade/sql/4.3.0/mysql/.gitkeep b/scripts/upgrade/sql/4.3.0/mysql/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/upgrade/sql/4.3.0/sqlite3/.gitkeep b/scripts/upgrade/sql/4.3.0/sqlite3/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/upgrade/sql/4.4.0/mysql/.gitkeep b/scripts/upgrade/sql/4.4.0/mysql/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/upgrade/sql/4.4.0/sqlite3/.gitkeep b/scripts/upgrade/sql/4.4.0/sqlite3/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/upgrade/sql/5.0.0/mysql/seahub.sql b/scripts/upgrade/sql/5.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..c51a14d698 --- /dev/null +++ b/scripts/upgrade/sql/5.0.0/mysql/seahub.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS `constance_config` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `key` varchar(255) NOT NULL, + `value` longtext NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `key` (`key`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `profile_profile` ADD `login_id` varchar(225) DEFAULT NULL; +ALTER TABLE `profile_profile` ADD `contact_email` varchar(225) DEFAULT NULL; +ALTER TABLE `profile_profile` ADD `institution` varchar(225) DEFAULT NULL; + +ALTER TABLE `profile_profile` ADD UNIQUE INDEX (`login_id`); +ALTER TABLE `profile_profile` ADD INDEX (`contact_email`); +ALTER TABLE `profile_profile` ADD INDEX (`institution`); + + diff --git a/scripts/upgrade/sql/5.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/5.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..a6714968da --- /dev/null +++ b/scripts/upgrade/sql/5.0.0/sqlite3/seahub.sql @@ -0,0 +1,13 @@ +CREATE TABLE IF NOT EXISTS "constance_config" ( + "id" integer NOT NULL PRIMARY KEY, + "key" varchar(255) NOT NULL UNIQUE, + "value" text NOT NULL +); + +ALTER TABLE "profile_profile" ADD COLUMN "login_id" varchar(225); +ALTER TABLE "profile_profile" ADD COLUMN "contact_email" varchar(225); +ALTER TABLE "profile_profile" ADD COLUMN "institution" varchar(225); + +CREATE UNIQUE INDEX "profile_profile_1b43c217" ON "profile_profile" ("login_id"); +CREATE INDEX "profile_profile_3b46cb17" ON "profile_profile" ("contact_email"); +CREATE INDEX "profile_profile_71bbc151" ON "profile_profile" ("institution"); diff --git a/scripts/upgrade/sql/5.1.0/mysql/seafile.sql b/scripts/upgrade/sql/5.1.0/mysql/seafile.sql new file mode 100644 index 0000000000..2742df03eb --- /dev/null +++ b/scripts/upgrade/sql/5.1.0/mysql/seafile.sql @@ -0,0 +1 @@ +alter table RepoTokenPeerInfo add client_ver varchar(20); \ No newline at end of file diff --git a/scripts/upgrade/sql/5.1.0/mysql/seahub.sql b/scripts/upgrade/sql/5.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..056fd8a3d0 --- /dev/null +++ b/scripts/upgrade/sql/5.1.0/mysql/seahub.sql @@ -0,0 +1,124 @@ +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + + +CREATE TABLE IF NOT EXISTS `post_office_attachment` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `file` varchar(100) NOT NULL, + `name` varchar(255) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `post_office_attachment_emails` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `attachment_id` int(11) NOT NULL, + `email_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `attachment_id` (`attachment_id`,`email_id`), + KEY `post_office_attachment_emails_4be595e7` (`attachment_id`), + KEY `post_office_attachment_emails_830a6ccb` (`email_id`), + CONSTRAINT `attachment_id_refs_id_2d59d8fc` FOREIGN KEY (`attachment_id`) REFERENCES `post_office_attachment` (`id`), + CONSTRAINT `email_id_refs_id_061d81d8` FOREIGN KEY (`email_id`) REFERENCES `post_office_email` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `post_office_emailtemplate` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(255) NOT NULL, + `description` longtext NOT NULL, + `created` datetime NOT NULL, + `last_updated` datetime NOT NULL, + `subject` varchar(255) NOT NULL, + `content` longtext NOT NULL, + `html_content` longtext NOT NULL, + `language` varchar(12) NOT NULL, + `default_template_id` int(11) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `language` (`language`,`default_template_id`), + KEY `post_office_emailtemplate_84c7951d` (`default_template_id`), + CONSTRAINT `default_template_id_refs_id_a2bc649e` FOREIGN KEY (`default_template_id`) REFERENCES `post_office_emailtemplate` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `post_office_email` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `from_email` varchar(254) NOT NULL, + `to` longtext NOT NULL, + `cc` longtext NOT NULL, + `bcc` longtext NOT NULL, + `subject` varchar(255) NOT NULL, + `message` longtext NOT NULL, + `html_message` longtext NOT NULL, + `status` smallint(5) unsigned DEFAULT NULL, + `priority` smallint(5) unsigned DEFAULT NULL, + `created` datetime NOT NULL, + `last_updated` datetime NOT NULL, + `scheduled_time` datetime DEFAULT NULL, + `headers` longtext, + `template_id` int(11) DEFAULT NULL, + `context` longtext, + `backend_alias` varchar(64) NOT NULL, + PRIMARY KEY (`id`), + KEY `post_office_email_48fb58bb` (`status`), + KEY `post_office_email_63b5ea41` (`created`), + KEY `post_office_email_470d4868` (`last_updated`), + KEY `post_office_email_c83ff05e` (`scheduled_time`), + KEY `post_office_email_43d23afc` (`template_id`), + CONSTRAINT `template_id_refs_id_a5d97662` FOREIGN KEY (`template_id`) REFERENCES `post_office_emailtemplate` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE IF NOT EXISTS `post_office_log` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `email_id` int(11) NOT NULL, + `date` datetime NOT NULL, + `status` smallint(5) unsigned NOT NULL, + `exception_type` varchar(255) NOT NULL, + `message` longtext NOT NULL, + PRIMARY KEY (`id`), + KEY `post_office_log_830a6ccb` (`email_id`), + CONSTRAINT `email_id_refs_id_3d87f587` FOREIGN KEY (`email_id`) REFERENCES `post_office_email` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `institutions_institution` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(200) NOT NULL, + `create_time` datetime NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `institutions_institutionadmin` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(254) NOT NULL, + `institution_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `i_institution_id_5f792d6fe9a87ac9_fk_institutions_institution_id` (`institution_id`), + CONSTRAINT `i_institution_id_5f792d6fe9a87ac9_fk_institutions_institution_id` FOREIGN KEY (`institution_id`) REFERENCES `institutions_institution` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `sysadmin_extra_userloginlog` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `login_date` datetime NOT NULL, + `login_ip` varchar(128) NOT NULL, + PRIMARY KEY (`id`), + KEY `sysadmin_extra_userloginlog_14c4b06b` (`username`), + KEY `sysadmin_extra_userloginlog_28ed1ef0` (`login_date`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +ALTER TABLE `sysadmin_extra_userloginlog` MODIFY `login_ip` VARCHAR(128); + +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; diff --git a/scripts/upgrade/sql/5.1.0/sqlite3/seafile.sql b/scripts/upgrade/sql/5.1.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..2742df03eb --- /dev/null +++ b/scripts/upgrade/sql/5.1.0/sqlite3/seafile.sql @@ -0,0 +1 @@ +alter table RepoTokenPeerInfo add client_ver varchar(20); \ No newline at end of file diff --git a/scripts/upgrade/sql/5.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/5.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..6e68aa0609 --- /dev/null +++ b/scripts/upgrade/sql/5.1.0/sqlite3/seahub.sql @@ -0,0 +1,72 @@ +CREATE TABLE IF NOT EXISTS "post_office_attachment" ( + "id" integer NOT NULL PRIMARY KEY, + "file" varchar(100) NOT NULL, + "name" varchar(255) NOT NULL +); +CREATE TABLE IF NOT EXISTS "post_office_attachment_emails" ( + "id" integer NOT NULL PRIMARY KEY, + "attachment_id" integer NOT NULL, + "email_id" integer NOT NULL REFERENCES "post_office_email" ("id"), + UNIQUE ("attachment_id", "email_id") +); +CREATE TABLE IF NOT EXISTS "post_office_email" ( + "id" integer NOT NULL PRIMARY KEY, + "from_email" varchar(254) NOT NULL, + "to" text NOT NULL, + "cc" text NOT NULL, + "bcc" text NOT NULL, + "subject" varchar(255) NOT NULL, + "message" text NOT NULL, + "html_message" text NOT NULL, + "status" smallint unsigned, + "priority" smallint unsigned, + "created" datetime NOT NULL, + "last_updated" datetime NOT NULL, + "scheduled_time" datetime, + "headers" text, + "template_id" integer, + "context" text, + "backend_alias" varchar(64) NOT NULL +); +CREATE TABLE IF NOT EXISTS "post_office_emailtemplate" ( + "id" integer NOT NULL PRIMARY KEY, + "name" varchar(255) NOT NULL, + "description" text NOT NULL, + "created" datetime NOT NULL, + "last_updated" datetime NOT NULL, + "subject" varchar(255) NOT NULL, + "content" text NOT NULL, + "html_content" text NOT NULL, + "language" varchar(12) NOT NULL, + "default_template_id" integer, + UNIQUE ("language", "default_template_id") +); +CREATE TABLE IF NOT EXISTS "post_office_log" ( + "id" integer NOT NULL PRIMARY KEY, + "email_id" integer NOT NULL REFERENCES "post_office_email" ("id"), + "date" datetime NOT NULL, + "status" smallint unsigned NOT NULL, + "exception_type" varchar(255) NOT NULL, + "message" text NOT NULL +); +CREATE TABLE IF NOT EXISTS "institutions_institution" ( + "id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, + "name" varchar(200) NOT NULL, + "create_time" datetime NOT NULL +); +CREATE TABLE IF NOT EXISTS "institutions_institutionadmin" ( + "id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, + "user" varchar(254) NOT NULL, + "institution_id" integer NOT NULL REFERENCES "institutions_institution" ("id") +); + +CREATE INDEX IF NOT EXISTS "post_office_attachment_emails_4be595e7" ON "post_office_attachment_emails" ("attachment_id"); +CREATE INDEX IF NOT EXISTS "post_office_attachment_emails_830a6ccb" ON "post_office_attachment_emails" ("email_id"); +CREATE INDEX IF NOT EXISTS "post_office_email_43d23afc" ON "post_office_email" ("template_id"); +CREATE INDEX IF NOT EXISTS "post_office_email_470d4868" ON "post_office_email" ("last_updated"); +CREATE INDEX IF NOT EXISTS "post_office_email_48fb58bb" ON "post_office_email" ("status"); +CREATE INDEX IF NOT EXISTS "post_office_email_63b5ea41" ON "post_office_email" ("created"); +CREATE INDEX IF NOT EXISTS "post_office_email_c83ff05e" ON "post_office_email" ("scheduled_time"); +CREATE INDEX IF NOT EXISTS "post_office_emailtemplate_84c7951d" ON "post_office_emailtemplate" ("default_template_id"); +CREATE INDEX IF NOT EXISTS "post_office_log_830a6ccb" ON "post_office_log" ("email_id"); +CREATE INDEX "institutions_institutionadmin_a964baeb" ON "institutions_institutionadmin" ("institution_id"); diff --git a/scripts/upgrade/sql/6.0.0/mysql/seahub.sql b/scripts/upgrade/sql/6.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..fe9516a1c6 --- /dev/null +++ b/scripts/upgrade/sql/6.0.0/mysql/seahub.sql @@ -0,0 +1,104 @@ +ALTER TABLE api2_tokenv2 ADD COLUMN wiped_at DATETIME DEFAULT NULL; +ALTER TABLE api2_tokenv2 ADD COLUMN created_at DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00"; + +CREATE TABLE IF NOT EXISTS `base_filecomment` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `parent_path` longtext NOT NULL, + `repo_id_parent_path_md5` varchar(100) NOT NULL, + `item_name` longtext NOT NULL, + `author` varchar(255) NOT NULL, + `comment` longtext NOT NULL, + `created_at` datetime NOT NULL, + `updated_at` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `base_filecomment_9a8c79bf` (`repo_id`), + KEY `base_filecomment_c5bf47d4` (`repo_id_parent_path_md5`), + KEY `base_filecomment_02bd92fa` (`author`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `termsandconditions_termsandconditions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `slug` varchar(50) NOT NULL, + `name` longtext NOT NULL, + `version_number` decimal(6,2) NOT NULL, + `text` longtext, + `info` longtext, + `date_active` datetime DEFAULT NULL, + `date_created` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `termsandconditions_termsandconditions_2dbcba41` (`slug`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `termsandconditions_usertermsandconditions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `ip_address` char(39) DEFAULT NULL, + `date_accepted` datetime NOT NULL, + `terms_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `termsandconditions_usertermsandcon_username_f4ab54cafa29322_uniq` (`username`,`terms_id`), + KEY `e4da106203f3f13ff96409b55de6f515` (`terms_id`), + CONSTRAINT `e4da106203f3f13ff96409b55de6f515` FOREIGN KEY (`terms_id`) REFERENCES `termsandconditions_termsandconditions` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `two_factor_totpdevice` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(255) NOT NULL, + `name` varchar(64) NOT NULL, + `confirmed` tinyint(1) NOT NULL, + `key` varchar(80) NOT NULL, + `step` smallint(5) unsigned NOT NULL, + `t0` bigint(20) NOT NULL, + `digits` smallint(5) unsigned NOT NULL, + `tolerance` smallint(5) unsigned NOT NULL, + `drift` smallint(6) NOT NULL, + `last_t` bigint(20) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `user` (`user`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `two_factor_phonedevice` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(255) NOT NULL, + `name` varchar(64) NOT NULL, + `confirmed` tinyint(1) NOT NULL, + `number` varchar(40) NOT NULL, + `key` varchar(40) NOT NULL, + `method` varchar(4) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `user` (`user`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `two_factor_staticdevice` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user` varchar(255) NOT NULL, + `name` varchar(64) NOT NULL, + `confirmed` tinyint(1) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `user` (`user`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `two_factor_statictoken` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `token` varchar(16) NOT NULL, + `device_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `two_fac_device_id_55a7b345293a7c6c_fk_two_factor_staticdevice_id` (`device_id`), + KEY `two_factor_statictoken_94a08da1` (`token`), + CONSTRAINT `two_fac_device_id_55a7b345293a7c6c_fk_two_factor_staticdevice_id` FOREIGN KEY (`device_id`) REFERENCES `two_factor_staticdevice` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `invitations_invitation` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `token` varchar(40) NOT NULL, + `inviter` varchar(255) NOT NULL, + `accepter` varchar(255) NOT NULL, + `invite_time` datetime NOT NULL, + `accept_time` datetime DEFAULT NULL, + `invite_type` varchar(20) NOT NULL, + `expire_time` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `invitations_invitation_d5dd16f8` (`inviter`), + KEY `invitations_invitation_token_1961fbb98c05e5fd_uniq` (`token`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/6.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/6.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..46bb396d22 --- /dev/null +++ b/scripts/upgrade/sql/6.0.0/sqlite3/seahub.sql @@ -0,0 +1,24 @@ +CREATE TABLE IF NOT EXISTS "base_filecomment" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "parent_path" text NOT NULL, "repo_id_parent_path_md5" varchar(100) NOT NULL, "item_name" text NOT NULL, "author" varchar(255) NOT NULL, "comment" text NOT NULL, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "base_filecomment_02bd92fa" ON "base_filecomment" ("author"); +CREATE INDEX IF NOT EXISTS "base_filecomment_9a8c79bf" ON "base_filecomment" ("repo_id"); +CREATE INDEX IF NOT EXISTS "base_filecomment_c5bf47d4" ON "base_filecomment" ("repo_id_parent_path_md5"); + +CREATE TABLE IF NOT EXISTS "termsandconditions_termsandconditions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "slug" varchar(50) NOT NULL, "name" text NOT NULL, "version_number" decimal NOT NULL, "text" text NULL, "info" text NULL, "date_active" datetime NULL, "date_created" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "termsandconditions_termsandconditions_2dbcba41" ON "termsandconditions_termsandconditions" ("slug"); + +CREATE TABLE IF NOT EXISTS "termsandconditions_usertermsandconditions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "ip_address" char(39) NULL, "date_accepted" datetime NOT NULL, "terms_id" integer NOT NULL REFERENCES "termsandconditions_termsandconditions" ("id"), UNIQUE ("username", "terms_id")); +CREATE INDEX IF NOT EXISTS "termsandconditions_usertermsandconditions_2ab34720" ON "termsandconditions_usertermsandconditions" ("terms_id"); + +CREATE TABLE IF NOT EXISTS "two_factor_phonedevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL, "number" varchar(40) NOT NULL, "key" varchar(40) NOT NULL, "method" varchar(4) NOT NULL); +CREATE TABLE IF NOT EXISTS "two_factor_staticdevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL); +CREATE TABLE IF NOT EXISTS "two_factor_statictoken" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(16) NOT NULL, "device_id" integer NOT NULL REFERENCES "two_factor_staticdevice" ("id")); +CREATE TABLE IF NOT EXISTS "two_factor_totpdevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL, "key" varchar(80) NOT NULL, "step" smallint unsigned NOT NULL, "t0" bigint NOT NULL, "digits" smallint unsigned NOT NULL, "tolerance" smallint unsigned NOT NULL, "drift" smallint NOT NULL, "last_t" bigint NOT NULL); +CREATE INDEX IF NOT EXISTS "two_factor_statictoken_94a08da1" ON "two_factor_statictoken" ("token"); +CREATE INDEX IF NOT EXISTS "two_factor_statictoken_9379346c" ON "two_factor_statictoken" ("device_id"); + +CREATE TABLE IF NOT EXISTS "invitations_invitation" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(40) NOT NULL, "inviter" varchar(255) NOT NULL, "accepter" varchar(255) NOT NULL, "invite_time" datetime NOT NULL, "accept_time" datetime NULL, "invite_type" varchar(20) NOT NULL, "expire_time" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "invitations_invitation_94a08da1" ON "invitations_invitation" ("token"); +CREATE INDEX IF NOT EXISTS "invitations_invitation_d5dd16f8" ON "invitations_invitation" ("inviter"); + +ALTER TABLE api2_tokenv2 ADD COLUMN wiped_at datetime DEFAULT NULL; +ALTER TABLE api2_tokenv2 ADD COLUMN created_at datetime NOT NULL DEFAULT '1970-01-01 00:00:00'; diff --git a/scripts/upgrade/sql/6.1.0/mysql/seahub.sql b/scripts/upgrade/sql/6.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..4ffd944b7a --- /dev/null +++ b/scripts/upgrade/sql/6.1.0/mysql/seahub.sql @@ -0,0 +1,23 @@ +ALTER TABLE `share_fileshare` MODIFY token varchar(100); +ALTER TABLE `share_fileshare` ADD COLUMN `permission` varchar(50) NOT NULL DEFAULT 'view_download'; +ALTER TABLE `share_uploadlinkshare` MODIFY token varchar(100); + +CREATE TABLE IF NOT EXISTS `institutions_institutionquota` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `quota` bigint(20) NOT NULL, + `institution_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `i_institution_id_2ca7c89373390e2c_fk_institutions_institution_id` (`institution_id`), + CONSTRAINT `i_institution_id_2ca7c89373390e2c_fk_institutions_institution_id` FOREIGN KEY (`institution_id`) REFERENCES `institutions_institution` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `admin_log_adminlog` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `email` varchar(254) NOT NULL, + `operation` varchar(255) NOT NULL, + `detail` longtext NOT NULL, + `datetime` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `admin_log_adminlog_0c83f57c` (`email`), + KEY `admin_log_adminlog_f7235a61` (`operation`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; \ No newline at end of file diff --git a/scripts/upgrade/sql/6.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/6.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..bca643581c --- /dev/null +++ b/scripts/upgrade/sql/6.1.0/sqlite3/seahub.sql @@ -0,0 +1,9 @@ +alter table share_fileshare add column permission varchar(50) not null default 'view_download'; + +CREATE TABLE "admin_log_adminlog" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "email" varchar(254) NOT NULL, "operation" varchar(255) NOT NULL, "detail" text NOT NULL, "datetime" datetime NOT NULL); + +CREATE TABLE "institutions_institutionquota" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "quota" bigint NOT NULL, "institution_id" integer NOT NULL REFERENCES "institutions_institution" ("id")); + +CREATE INDEX "admin_log_adminlog_0c83f57c" ON "admin_log_adminlog" ("email"); +CREATE INDEX "admin_log_adminlog_f7235a61" ON "admin_log_adminlog" ("operation"); +CREATE INDEX "institutions_institutionquota_a964baeb" ON "institutions_institutionquota" ("institution_id"); \ No newline at end of file diff --git a/scripts/upgrade/sql/6.2.0/mysql/ccnet.sql b/scripts/upgrade/sql/6.2.0/mysql/ccnet.sql new file mode 100644 index 0000000000..ac5d5a2127 --- /dev/null +++ b/scripts/upgrade/sql/6.2.0/mysql/ccnet.sql @@ -0,0 +1,4 @@ +alter table LDAPUsers add column reference_id VARCHAR(255); +alter table EmailUser add column reference_id VARCHAR(255); +ALTER TABLE `LDAPUsers` ADD UNIQUE (`reference_id`); +ALTER TABLE `EmailUser` ADD UNIQUE (`reference_id`); \ No newline at end of file diff --git a/scripts/upgrade/sql/6.2.0/mysql/seahub.sql b/scripts/upgrade/sql/6.2.0/mysql/seahub.sql new file mode 100644 index 0000000000..81817f8a40 --- /dev/null +++ b/scripts/upgrade/sql/6.2.0/mysql/seahub.sql @@ -0,0 +1,84 @@ +CREATE TABLE IF NOT EXISTS `revision_tag_tags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `revision_tag_revisiontags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `revision_id` varchar(255) NOT NULL, + `tag_id` int(11) NOT NULL, + `username` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + KEY `revision_tag_rev_tag_id_37c2d76166c50597_fk_revision_tag_tags_id` (`tag_id`), + KEY `revision_tag_revisiontags_9a8c79bf` (`repo_id`), + KEY `revision_tag_revisiontags_5de09a8d` (`revision_id`), + KEY `revision_tag_revisiontags_14c4b06b` (`username`), + CONSTRAINT `revision_tag_rev_tag_id_37c2d76166c50597_fk_revision_tag_tags_id` FOREIGN KEY (`tag_id`) REFERENCES `revision_tag_tags` (`id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `share_extrasharepermission` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `share_to` varchar(255) NOT NULL, + `permission` varchar(30) NOT NULL, + PRIMARY KEY (`id`), + KEY `share_extrasharepermission_9a8c79bf` (`repo_id`), + KEY `share_extrasharepermission_e4fb1dad` (`share_to`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `share_extragroupssharepermission` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `group_id` int(11) NOT NULL, + `permission` varchar(30) NOT NULL, + PRIMARY KEY (`id`), + KEY `share_extragroupssharepermission_9a8c79bf` (`repo_id`), + KEY `share_extragroupssharepermission_0e939a4f` (`group_id`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `tags_fileuuidmap` ( + `uuid` char(32) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `repo_id_parent_path_md5` varchar(100) NOT NULL, + `parent_path` longtext NOT NULL, + `filename` varchar(1024) NOT NULL, + `is_dir` tinyint(1) NOT NULL, + PRIMARY KEY (`uuid`), + KEY `tags_fileuuidmap_9a8c79bf` (`repo_id`), + KEY `tags_fileuuidmap_c5bf47d4` (`repo_id_parent_path_md5`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `tags_tags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `tags_filetag` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `uuid_id` char(32) NOT NULL, + `tag_id` int(11) NOT NULL, + `username` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + KEY `tags_filetag_uuid_id_5e2dc8ebbab85301_fk_tags_fileuuidmap_uuid` (`uuid_id`), + KEY `tags_filetag_tag_id_39c4746ee9d70b71_fk_tags_tags_id` (`tag_id`), + CONSTRAINT `tags_filetag_tag_id_39c4746ee9d70b71_fk_tags_tags_id` FOREIGN KEY (`tag_id`) REFERENCES `tags_tags` (`id`), + CONSTRAINT `tags_filetag_uuid_id_5e2dc8ebbab85301_fk_tags_fileuuidmap_uuid` FOREIGN KEY (`uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `role_permissions_adminrole` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `email` varchar(254) NOT NULL, + `role` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `email` (`email`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `sysadmin_extra_userloginlog` ADD COLUMN `login_success` tinyint(1) NOT NULL default 1; +ALTER TABLE `profile_profile` ADD COLUMN `list_in_address_book` tinyint(1) NOT NULL default 0; +ALTER TABLE `profile_profile` ADD INDEX `profile_profile_3d5d3631` (`list_in_address_book`); \ No newline at end of file diff --git a/scripts/upgrade/sql/6.2.0/sqlite3/ccnet/usermgr.sql b/scripts/upgrade/sql/6.2.0/sqlite3/ccnet/usermgr.sql new file mode 100644 index 0000000000..e548e09d69 --- /dev/null +++ b/scripts/upgrade/sql/6.2.0/sqlite3/ccnet/usermgr.sql @@ -0,0 +1,4 @@ +alter table LDAPUsers add column reference_id VARCHAR(255); +alter table EmailUser add column reference_id VARCHAR(255); +CREATE UNIQUE INDEX IF NOT EXISTS reference_id_index on EmailUser (reference_id); +CREATE UNIQUE INDEX IF NOT EXISTS ldapusers_reference_id_index on LDAPUsers(reference_id); \ No newline at end of file diff --git a/scripts/upgrade/sql/6.2.0/sqlite3/seahub.sql b/scripts/upgrade/sql/6.2.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..8d60dbccc9 --- /dev/null +++ b/scripts/upgrade/sql/6.2.0/sqlite3/seahub.sql @@ -0,0 +1,24 @@ +alter table sysadmin_extra_userloginlog add column login_success bool not null default 1; +alter table profile_profile add column list_in_address_book bool not null default 0; + +CREATE TABLE IF NOT EXISTS "share_extragroupssharepermission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "group_id" integer NOT NULL, "permission" varchar(30) NOT NULL); +CREATE TABLE IF NOT EXISTS "share_extrasharepermission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "share_to" varchar(255) NOT NULL, "permission" varchar(30) NOT NULL); +CREATE TABLE IF NOT EXISTS "tags_fileuuidmap" ("uuid" char(32) NOT NULL PRIMARY KEY, "repo_id" varchar(36) NOT NULL, "repo_id_parent_path_md5" varchar(100) NOT NULL, "parent_path" text NOT NULL, "filename" varchar(1024) NOT NULL, "is_dir" bool NOT NULL); +CREATE TABLE IF NOT EXISTS "tags_tags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE); +CREATE TABLE IF NOT EXISTS "tags_filetag" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"), "tag_id" integer NOT NULL REFERENCES "tags_tags" ("id"), "username" varchar(255) NOT NULL); +CREATE TABLE IF NOT EXISTS "revision_tag_tags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE); +CREATE TABLE IF NOT EXISTS "revision_tag_revisiontags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "path" text NOT NULL, "revision_id" varchar(255) NOT NULL, "tag_id" integer NOT NULL REFERENCES "revision_tag_tags" ("id"), "username" varchar(255) NOT NULL); +CREATE TABLE IF NOT EXISTS "role_permissions_adminrole" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "email" varchar(254) NOT NULL UNIQUE, "role" varchar(255) NOT NULL); +CREATE INDEX IF NOT EXISTS "share_extragroupssharepermission_9a8c79bf" ON "share_extragroupssharepermission" ("repo_id"); +CREATE INDEX IF NOT EXISTS "share_extragroupssharepermission_0e939a4f" ON "share_extragroupssharepermission" ("group_id"); +CREATE INDEX IF NOT EXISTS "share_extrasharepermission_9a8c79bf" ON "share_extrasharepermission" ("repo_id"); +CREATE INDEX IF NOT EXISTS "share_extrasharepermission_e4fb1dad" ON "share_extrasharepermission" ("share_to"); +CREATE INDEX IF NOT EXISTS "tags_fileuuidmap_9a8c79bf" ON "tags_fileuuidmap" ("repo_id"); +CREATE INDEX IF NOT EXISTS "tags_fileuuidmap_c5bf47d4" ON "tags_fileuuidmap" ("repo_id_parent_path_md5"); +CREATE INDEX IF NOT EXISTS "tags_filetag_10634818" ON "tags_filetag" ("uuid_id"); +CREATE INDEX IF NOT EXISTS "tags_filetag_76f094bc" ON "tags_filetag" ("tag_id"); +CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_9a8c79bf" ON "revision_tag_revisiontags" ("repo_id"); +CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_5de09a8d" ON "revision_tag_revisiontags" ("revision_id"); +CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_76f094bc" ON "revision_tag_revisiontags" ("tag_id"); +CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_14c4b06b" ON "revision_tag_revisiontags" ("username"); +CREATE INDEX IF NOT EXISTS "profile_profile_3d5d3631" ON "profile_profile" ("list_in_address_book"); \ No newline at end of file diff --git a/scripts/upgrade/sql/6.3.0/mysql/ccnet.sql b/scripts/upgrade/sql/6.3.0/mysql/ccnet.sql new file mode 100644 index 0000000000..58f187a587 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/mysql/ccnet.sql @@ -0,0 +1,24 @@ +CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS GroupStructure (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER, path VARCHAR(1024), UNIQUE INDEX(group_id))ENGINE=INNODB; + +alter table `Group` add column parent_group_id INTEGER default 0; -- Replace `Group` if you configured table `Group` to another name. + +ALTER TABLE Binding ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE LDAPConfig ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE OrgUser DROP primary key; +ALTER TABLE OrgUser ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgUser ADD UNIQUE (org_id, email); + +ALTER TABLE OrgGroup DROP primary key; +ALTER TABLE OrgGroup ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgGroup ADD UNIQUE (org_id, group_id); + +ALTER TABLE GroupUser DROP primary key; +ALTER TABLE GroupUser ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE GroupUser ADD UNIQUE (group_id, user_name); + +ALTER TABLE GroupDNPair ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + diff --git a/scripts/upgrade/sql/6.3.0/mysql/seafevents.sql b/scripts/upgrade/sql/6.3.0/mysql/seafevents.sql new file mode 100644 index 0000000000..b41ed2c844 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/mysql/seafevents.sql @@ -0,0 +1,4 @@ +ALTER TABLE Event ADD INDEX `ix_event_timestamp` (`timestamp`); +ALTER TABLE FileAudit ADD INDEX `ix_FileAudit_timestamp` (`timestamp`); +ALTER TABLE FileUpdate ADD INDEX `ix_FileUpdate_timestamp` (`timestamp`); +ALTER TABLE UserTrafficStat ADD INDEX `ix_UserTrafficStat_month` (`month`); diff --git a/scripts/upgrade/sql/6.3.0/mysql/seafile.sql b/scripts/upgrade/sql/6.3.0/mysql/seafile.sql new file mode 100644 index 0000000000..7096b6689d --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/mysql/seafile.sql @@ -0,0 +1,87 @@ +CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB; + +CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, name VARCHAR(255) NOT NULL, update_time BIGINT, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255)) ENGINE=INNODB; + +ALTER TABLE Repo DROP primary key; +ALTER TABLE Repo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE Repo ADD UNIQUE (repo_id); + +ALTER TABLE RepoOwner DROP primary key; +ALTER TABLE RepoOwner ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoOwner ADD UNIQUE (repo_id); + +ALTER TABLE RepoGroup ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE InnerPubRepo DROP primary key; +ALTER TABLE InnerPubRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE InnerPubRepo ADD UNIQUE (repo_id); + +ALTER TABLE RepoUserToken ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE RepoTokenPeerInfo DROP primary key; +ALTER TABLE RepoTokenPeerInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoTokenPeerInfo ADD UNIQUE (token); + +ALTER TABLE RepoHead DROP primary key; +ALTER TABLE RepoHead ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoHead ADD UNIQUE (repo_id); + +ALTER TABLE RepoSize DROP primary key; +ALTER TABLE RepoSize ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoSize ADD UNIQUE (repo_id); + +ALTER TABLE RepoHistoryLimit DROP primary key; +ALTER TABLE RepoHistoryLimit ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoHistoryLimit ADD UNIQUE (repo_id); + +ALTER TABLE RepoValidSince DROP primary key; +ALTER TABLE RepoValidSince ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoValidSince ADD UNIQUE (repo_id); + +ALTER TABLE WebAP DROP primary key; +ALTER TABLE WebAP ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE WebAP ADD UNIQUE (repo_id); + +ALTER TABLE VirtualRepo DROP primary key; +ALTER TABLE VirtualRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE VirtualRepo ADD UNIQUE (repo_id); + +ALTER TABLE GarbageRepos DROP primary key; +ALTER TABLE GarbageRepos ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE GarbageRepos ADD UNIQUE (repo_id); + +ALTER TABLE RepoTrash DROP primary key; +ALTER TABLE RepoTrash ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoTrash ADD UNIQUE (repo_id); + +ALTER TABLE RepoFileCount DROP primary key; +ALTER TABLE RepoFileCount ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoFileCount ADD UNIQUE (repo_id); + +ALTER TABLE RepoInfo DROP primary key; +ALTER TABLE RepoInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE RepoInfo ADD UNIQUE (repo_id); + +ALTER TABLE UserQuota DROP primary key; +ALTER TABLE UserQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE UserQuota ADD UNIQUE (user); + +ALTER TABLE UserShareQuota DROP primary key; +ALTER TABLE UserShareQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE UserShareQuota ADD UNIQUE (user); + +ALTER TABLE OrgQuota DROP primary key; +ALTER TABLE OrgQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgQuota ADD UNIQUE (org_id); + +ALTER TABLE OrgUserQuota DROP primary key; +ALTER TABLE OrgUserQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE OrgUserQuota ADD UNIQUE (org_id, user); + +ALTER TABLE SystemInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; + +ALTER TABLE Branch DROP primary key; +ALTER TABLE Branch ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE Branch ADD UNIQUE (repo_id, name); + +ALTER TABLE SeafileConf ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; diff --git a/scripts/upgrade/sql/6.3.0/mysql/seahub.sql b/scripts/upgrade/sql/6.3.0/mysql/seahub.sql new file mode 100644 index 0000000000..692d675039 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/mysql/seahub.sql @@ -0,0 +1,170 @@ +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_group` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(80) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `name` (`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_group` DISABLE KEYS */; +/*!40000 ALTER TABLE `auth_group` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_group_permissions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` int(11) NOT NULL, + `permission_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `auth_group_permissions_group_id_permission_id_0cd325b0_uniq` (`group_id`,`permission_id`), + KEY `auth_group_permissio_permission_id_84c5c92e_fk_auth_perm` (`permission_id`), + CONSTRAINT `auth_group_permissio_permission_id_84c5c92e_fk_auth_perm` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`), + CONSTRAINT `auth_group_permissions_group_id_b120cbf9_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_group_permissions` DISABLE KEYS */; +/*!40000 ALTER TABLE `auth_group_permissions` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_permission` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(255) NOT NULL, + `content_type_id` int(11) NOT NULL, + `codename` varchar(100) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `auth_permission_content_type_id_codename_01ab375a_uniq` (`content_type_id`,`codename`), + CONSTRAINT `auth_permission_content_type_id_2f476e4b_fk_django_co` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=209 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_permission` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_user` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `password` varchar(128) NOT NULL, + `last_login` datetime DEFAULT NULL, + `is_superuser` tinyint(1) NOT NULL, + `username` varchar(150) NOT NULL, + `first_name` varchar(30) NOT NULL, + `last_name` varchar(30) NOT NULL, + `email` varchar(254) NOT NULL, + `is_staff` tinyint(1) NOT NULL, + `is_active` tinyint(1) NOT NULL, + `date_joined` datetime NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `username` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_user` DISABLE KEYS */; +/*!40000 ALTER TABLE `auth_user` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_user_groups` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user_id` int(11) NOT NULL, + `group_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `auth_user_groups_user_id_group_id_94350c0c_uniq` (`user_id`,`group_id`), + KEY `auth_user_groups_group_id_97559544_fk_auth_group_id` (`group_id`), + CONSTRAINT `auth_user_groups_group_id_97559544_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`), + CONSTRAINT `auth_user_groups_user_id_6a12ed8b_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `auth_user_groups` DISABLE KEYS */; +/*!40000 ALTER TABLE `auth_user_groups` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `auth_user_user_permissions` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user_id` int(11) NOT NULL, + `permission_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `auth_user_user_permissions_user_id_permission_id_14a6b632_uniq` (`user_id`,`permission_id`), + KEY `auth_user_user_permi_permission_id_1fbb5f2c_fk_auth_perm` (`permission_id`), + CONSTRAINT `auth_user_user_permi_permission_id_1fbb5f2c_fk_auth_perm` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`), + CONSTRAINT `auth_user_user_permissions_user_id_a95ead1b_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + + +/*!40000 ALTER TABLE `wiki_personalwiki` DISABLE KEYS */; +/*!40000 ALTER TABLE `wiki_personalwiki` ENABLE KEYS */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `wiki_wiki` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `name` varchar(255) NOT NULL, + `slug` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `permission` varchar(50) NOT NULL, + `created_at` datetime NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `slug` (`slug`), + UNIQUE KEY `wiki_wiki_username_3c0f83e1b93de663_uniq` (`username`,`repo_id`), + KEY `wiki_wiki_fde81f11` (`created_at`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +/*!40000 ALTER TABLE `wiki_wiki` DISABLE KEYS */; +/*!40000 ALTER TABLE `wiki_wiki` ENABLE KEYS */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +CREATE TABLE IF NOT EXISTS `django_cas_ng_proxygrantingticket` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `session_key` varchar(255) DEFAULT NULL, + `pgtiou` varchar(255) DEFAULT NULL, + `pgt` varchar(255) DEFAULT NULL, + `date` datetime NOT NULL, + `user` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `django_cas_ng_proxygrant_session_key_user_id_4cd2ea19_uniq` (`session_key`,`user`), + KEY `django_cas_ng_proxyg_user_id_f833edd2_fk_auth_user` (`user`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +CREATE TABLE IF NOT EXISTS `django_cas_ng_sessionticket` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `session_key` varchar(255) NOT NULL, + `ticket` varchar(255) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `organizations_orgmemberquota` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `org_id` int(11) NOT NULL, + `quota` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `organizations_orgmemberquota_org_id_93dde51d` (`org_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +ALTER TABLE notifications_notification ADD INDEX `notifications_notification_386bba5a` (`primary`); + +ALTER TABLE institutions_institutionadmin ADD INDEX `institutions_institutionadmin_user_7560167c8413ff0e_uniq` (`user`); + +ALTER TABLE `post_office_attachment` add column `mimetype` varchar(255) NOT NULL; diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/groupmgr.sql b/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/groupmgr.sql new file mode 100644 index 0000000000..e52ed988e7 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/groupmgr.sql @@ -0,0 +1,2 @@ +CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, path VARCHAR(1024)); +alter table `Group` add column parent_group_id INTEGER default 0; -- Replace `Group` if you configured table `Group` to another name. diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/usermgr.sql b/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/usermgr.sql new file mode 100644 index 0000000000..830bfd3fbf --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/ccnet/usermgr.sql @@ -0,0 +1 @@ +CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER); diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/seafevents.sql b/scripts/upgrade/sql/6.3.0/sqlite3/seafevents.sql new file mode 100644 index 0000000000..9327a2206b --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/seafevents.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS ix_event_timestamp ON Event (timestamp); +CREATE INDEX IF NOT EXISTS ix_FileAudit_timestamp ON FileAudit (timestamp); +CREATE INDEX IF NOT EXISTS ix_FileUpdate_timestamp ON FileUpdate (timestamp); +CREATE INDEX IF NOT EXISTS ix_UserTrafficStat_month ON UserTrafficStat (month); diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/seafile.sql b/scripts/upgrade/sql/6.3.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..b39a75cb12 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/seafile.sql @@ -0,0 +1,3 @@ +CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER); + +CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, name VARCHAR(255) NOT NULL, update_time INTEGER, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255)); diff --git a/scripts/upgrade/sql/6.3.0/sqlite3/seahub.sql b/scripts/upgrade/sql/6.3.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..ef01d251d6 --- /dev/null +++ b/scripts/upgrade/sql/6.3.0/sqlite3/seahub.sql @@ -0,0 +1,39 @@ +CREATE TABLE IF NOT EXISTS "auth_group" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(80) NOT NULL UNIQUE); +CREATE TABLE IF NOT EXISTS "auth_group_permissions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "group_id" integer NOT NULL REFERENCES "auth_group" ("id"), "permission_id" integer NOT NULL REFERENCES "auth_permission" ("id")); +CREATE TABLE IF NOT EXISTS "auth_user_groups" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user_id" integer NOT NULL REFERENCES "auth_user" ("id"), "group_id" integer NOT NULL REFERENCES "auth_group" ("id")); +CREATE TABLE IF NOT EXISTS "auth_user_user_permissions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user_id" integer NOT NULL REFERENCES "auth_user" ("id"), "permission_id" integer NOT NULL REFERENCES "auth_permission" ("id")); +CREATE TABLE IF NOT EXISTS "auth_permission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "content_type_id" integer NOT NULL REFERENCES "django_content_type" ("id"), "codename" varchar(100) NOT NULL, "name" varchar(255) NOT NULL); +CREATE TABLE IF NOT EXISTS "auth_user" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "password" varchar(128) NOT NULL, "last_login" datetime NULL, "is_superuser" bool NOT NULL, "first_name" varchar(30) NOT NULL, "last_name" varchar(30) NOT NULL, "email" varchar(254) NOT NULL, "is_staff" bool NOT NULL, "is_active" bool NOT NULL, "date_joined" datetime NOT NULL, "username" varchar(150) NOT NULL UNIQUE); + +CREATE TABLE IF NOT EXISTS "organizations_orgmemberquota" ( + "id" integer NOT NULL PRIMARY KEY, + "org_id" integer NOT NULL, + "quota" integer NOT NULL +); +CREATE TABLE IF NOT EXISTS "django_cas_ng_sessionticket" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "session_key" varchar(255) NOT NULL, "ticket" varchar(255) NOT NULL); +CREATE TABLE IF NOT EXISTS "django_cas_ng_proxygrantingticket" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "session_key" varchar(255) NULL, "pgtiou" varchar(255) NULL, "pgt" varchar(255) NULL, "date" datetime NOT NULL, "user" varchar(255) NOT NULL); + +CREATE UNIQUE INDEX IF NOT EXISTS "auth_group_permissions_group_id_permission_id_0cd325b0_uniq" ON "auth_group_permissions" ("group_id", "permission_id"); +CREATE INDEX IF NOT EXISTS "auth_group_permissions_group_id_b120cbf9" ON "auth_group_permissions" ("group_id"); +CREATE INDEX IF NOT EXISTS "auth_group_permissions_permission_id_84c5c92e" ON "auth_group_permissions" ("permission_id"); +CREATE UNIQUE INDEX IF NOT EXISTS "auth_user_groups_user_id_group_id_94350c0c_uniq" ON "auth_user_groups" ("user_id", "group_id"); +CREATE INDEX IF NOT EXISTS "auth_user_groups_user_id_6a12ed8b" ON "auth_user_groups" ("user_id"); +CREATE INDEX IF NOT EXISTS "auth_user_groups_group_id_97559544" ON "auth_user_groups" ("group_id"); +CREATE UNIQUE INDEX IF NOT EXISTS "auth_user_user_permissions_user_id_permission_id_14a6b632_uniq" ON "auth_user_user_permissions" ("user_id", "permission_id"); +CREATE INDEX IF NOT EXISTS "auth_user_user_permissions_user_id_a95ead1b" ON "auth_user_user_permissions" ("user_id"); +CREATE INDEX IF NOT EXISTS "auth_user_user_permissions_permission_id_1fbb5f2c" ON "auth_user_user_permissions" ("permission_id"); +CREATE UNIQUE INDEX IF NOT EXISTS "auth_permission_content_type_id_codename_01ab375a_uniq" ON "auth_permission" ("content_type_id", "codename"); +CREATE INDEX IF NOT EXISTS "auth_permission_content_type_id_2f476e4b" ON "auth_permission" ("content_type_id"); + +CREATE TABLE IF NOT EXISTS "wiki_wiki" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "name" varchar(255) NOT NULL, "slug" varchar(255) NOT NULL UNIQUE, "repo_id" varchar(36) NOT NULL, "permission" varchar(50) NOT NULL, "created_at" datetime NOT NULL, UNIQUE ("username", "repo_id")); + +CREATE INDEX IF NOT EXISTS "wiki_wiki_fde81f11" ON "wiki_wiki" ("created_at"); + +CREATE INDEX IF NOT EXISTS "notifications_notification_386bba5a" ON "notifications_notification" ("primary"); +CREATE INDEX IF NOT EXISTS "institutions_institutionadmin_ee11cbb1" ON "institutions_institutionadmin" ("user"); + +CREATE INDEX IF NOT EXISTS "organizations_orgmemberquota_944dadb6" ON "organizations_orgmemberquota" ("org_id"); +CREATE UNIQUE INDEX IF NOT EXISTS "django_cas_ng_proxygrantingticket_session_key_user_8a4ec2bc_uniq" ON "django_cas_ng_proxygrantingticket" ("session_key", "user"); +CREATE INDEX IF NOT EXISTS "django_cas_ng_proxygrantingticket_user_1f42619d" ON "django_cas_ng_proxygrantingticket" ("user"); + +ALTER TABLE "post_office_attachment" add column "mimetype" varchar(255); diff --git a/scripts/upgrade/sql/7.0.0/mysql/ccnet.sql b/scripts/upgrade/sql/7.0.0/mysql/ccnet.sql new file mode 100644 index 0000000000..885c275b51 --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/mysql/ccnet.sql @@ -0,0 +1 @@ +ALTER TABLE UserRole ADD COLUMN is_manual_set INTEGER DEFAULT 0; diff --git a/scripts/upgrade/sql/7.0.0/mysql/seafile.sql b/scripts/upgrade/sql/7.0.0/mysql/seafile.sql new file mode 100644 index 0000000000..4bae30fc6c --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/mysql/seafile.sql @@ -0,0 +1,4 @@ +ALTER TABLE RepoInfo ADD COLUMN status INTEGER DEFAULT 0; +CREATE TABLE IF NOT EXISTS RepoSyncError (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, token CHAR(41), error_time BIGINT UNSIGNED, error_con VARCHAR(1024), UNIQUE INDEX(token)); +ALTER TABLE RepoSyncError MODIFY COLUMN error_con VARCHAR(1024); +CREATE TABLE IF NOT EXISTS WebUploadTempFiles (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL); diff --git a/scripts/upgrade/sql/7.0.0/mysql/seahub.sql b/scripts/upgrade/sql/7.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..230b950d6b --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/mysql/seahub.sql @@ -0,0 +1,125 @@ +CREATE TABLE IF NOT EXISTS `drafts_draft` ( + `id` int(11) NOT NULL, + `created_at` datetime(6) NOT NULL, + `updated_at` datetime(6) NOT NULL, + `username` varchar(255) NOT NULL, + `origin_repo_id` varchar(36) NOT NULL, + `origin_file_version` varchar(100) NOT NULL, + `draft_file_path` varchar(1024) NOT NULL, + `origin_file_uuid` char(32) NOT NULL, + `publish_file_version` varchar(100) DEFAULT NULL, + `status` varchar(20) NOT NULL, + PRIMARY KEY (`id`), + KEY `drafts_draft_origin_file_uuid_id_f150319e_fk_tags_file` (`origin_file_uuid`), + KEY `drafts_draft_created_at_e9f4523f` (`created_at`), + KEY `drafts_draft_updated_at_0a144b05` (`updated_at`), + KEY `drafts_draft_username_73e6738b` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `drafts_draftreviewer` ( + `id` int(11) NOT NULL, + `reviewer` varchar(255) NOT NULL, + `draft_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `drafts_draftreviewer_reviewer_e4c777ac` (`reviewer`), + KEY `drafts_draftreviewer_draft_id_4ea59775_fk_drafts_draft_id` (`draft_id`), + CONSTRAINT `drafts_draftreviewer_draft_id_4ea59775_fk_drafts_draft_id` FOREIGN KEY (`draft_id`) REFERENCES `drafts_draft` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `options_useroptions` ADD INDEX `options_useroptions_option_key_7bf7ae4b` (`option_key`); + +ALTER TABLE TotalStorageStat DROP primary key; +ALTER TABLE TotalStorageStat ADD `id` BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE TotalStorageStat ADD `org_id` INT NOT NULL DEFAULT -1; +ALTER TABLE TotalStorageStat ADD INDEX `idx_storage_time_org` (`timestamp`, `org_id`); + +ALTER TABLE FileOpsStat ADD `org_id` INT NOT NULL DEFAULT -1; +ALTER TABLE FileOpsStat ADD INDEX `idx_file_ops_time_org` (`timestamp`, `org_id`); + +ALTER TABLE UserActivityStat DROP primary key; +ALTER TABLE UserActivityStat ADD `id` BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST; +ALTER TABLE UserActivityStat ADD UNIQUE (name_time_md5); +ALTER TABLE UserActivityStat ADD `org_id` INT NOT NULL DEFAULT -1; +ALTER TABLE UserActivityStat ADD INDEX `idx_activity_time_org` (`timestamp`, `org_id`); + +DROP TABLE UserTrafficStat; + + + +CREATE TABLE IF NOT EXISTS `repo_tags_repotags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `name` varchar(255) NOT NULL, + `color` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + KEY `repo_tags_repotags_repo_id_1163a48f` (`repo_id`), + KEY `repo_tags_repotags_name_3f4c9027` (`name`), + KEY `repo_tags_repotags_color_1292b6c1` (`color`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + + +CREATE TABLE IF NOT EXISTS `file_tags_filetags` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `file_uuid_id` char(32) NOT NULL, + `repo_tag_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `file_tags_filetags_file_uuid_id_e30f0ec8_fk_tags_file` (`file_uuid_id`), + KEY `file_tags_filetags_repo_tag_id_c39660cb_fk_repo_tags_repotags_id` (`repo_tag_id`), + CONSTRAINT `file_tags_filetags_file_uuid_id_e30f0ec8_fk_tags_file` FOREIGN KEY (`file_uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`), + CONSTRAINT `file_tags_filetags_repo_tag_id_c39660cb_fk_repo_tags_repotags_id` FOREIGN KEY (`repo_tag_id`) REFERENCES `repo_tags_repotags` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + + +CREATE TABLE IF NOT EXISTS `related_files_relatedfiles` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `o_uuid_id` char(32) NOT NULL, + `r_uuid_id` char(32) NOT NULL, + PRIMARY KEY (`id`), + KEY `related_files_relate_o_uuid_id_aaa8e613_fk_tags_file` (`o_uuid_id`), + KEY `related_files_relate_r_uuid_id_031751df_fk_tags_file` (`r_uuid_id`), + CONSTRAINT `related_files_relate_o_uuid_id_aaa8e613_fk_tags_file` FOREIGN KEY (`o_uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`), + CONSTRAINT `related_files_relate_r_uuid_id_031751df_fk_tags_file` FOREIGN KEY (`r_uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + + +CREATE TABLE IF NOT EXISTS `organizations_orgsettings` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `org_id` int(11) NOT NULL, + `role` varchar(100) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `organizations_orgsettings_org_id_630f6843_uniq` (`org_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +DROP INDEX `profile_profile_contact_email_0975e4bf_uniq` ON `profile_profile`; +ALTER TABLE `profile_profile` ADD CONSTRAINT `profile_profile_contact_email_0975e4bf_uniq` UNIQUE (`contact_email`); + +CREATE TABLE IF NOT EXISTS `social_auth_usersocialauth` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `provider` varchar(32) NOT NULL, + `uid` varchar(150) NOT NULL, + `extra_data` longtext NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `social_auth_usersocialauth_provider_uid_e6b5e668_uniq` (`provider`,`uid`), + KEY `social_auth_usersocialauth_username_3f06b5cf` (`username`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + + +ALTER TABLE `base_filecomment` ADD `detail` LONGTEXT DEFAULT NULL; +ALTER TABLE `base_filecomment` ADD `resolved` TINYINT(1) NOT NULL DEFAULT 0; +ALTER TABLE `base_filecomment` ADD INDEX `resolved` (`resolved`); + + + +CREATE TABLE IF NOT EXISTS `base_reposecretkey` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `secret_key` varchar(44) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `repo_id` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + diff --git a/scripts/upgrade/sql/7.0.0/sqlite3/ccnet/usermgr.sql b/scripts/upgrade/sql/7.0.0/sqlite3/ccnet/usermgr.sql new file mode 100644 index 0000000000..885c275b51 --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/sqlite3/ccnet/usermgr.sql @@ -0,0 +1 @@ +ALTER TABLE UserRole ADD COLUMN is_manual_set INTEGER DEFAULT 0; diff --git a/scripts/upgrade/sql/7.0.0/sqlite3/seafile.sql b/scripts/upgrade/sql/7.0.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..7c82724772 --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/sqlite3/seafile.sql @@ -0,0 +1,7 @@ +ALTER TABLE RepoInfo ADD COLUMN status INTEGER DEFAULT 0; +CREATE TABLE IF NOT EXISTS RepoSyncError (token CHAR(41) PRIMARY KEY, error_time BIGINT, error_con VARCHAR(1024)); +ALTER TABLE RepoSyncError RENAME TO TmpRepoSyncError; +CREATE TABLE RepoSyncError (token CHAR(41) PRIMARY KEY, error_time BIGINT, error_con VARCHAR(1024)); +INSERT INTO RepoSyncError SELECT * FROM TmpRepoSyncError; +DROP TABLE TmpRepoSyncError; +CREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL); diff --git a/scripts/upgrade/sql/7.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/7.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..a893b5b6a4 --- /dev/null +++ b/scripts/upgrade/sql/7.0.0/sqlite3/seahub.sql @@ -0,0 +1,40 @@ +CREATE TABLE IF NOT EXISTS "drafts_draft" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL, "username" varchar(255) NOT NULL, "origin_repo_id" varchar(36) NOT NULL, "origin_file_version" varchar(100) NOT NULL, "draft_file_path" varchar(1024) NOT NULL, "publish_file_version" varchar(100) NULL, "status" varchar(20) NOT NULL, "origin_file_uuid" char(32) NOT NULL); +CREATE INDEX IF NOT EXISTS "drafts_draft_created_at_e9f4523f" ON "drafts_draft" ("created_at"); +CREATE INDEX IF NOT EXISTS "drafts_draft_updated_at_0a144b05" ON "drafts_draft" ("updated_at"); +CREATE INDEX IF NOT EXISTS "drafts_draft_username_73e6738b" ON "drafts_draft" ("username"); +CREATE INDEX IF NOT EXISTS "drafts_draft_origin_file_uuid_7c003c98" ON "drafts_draft" ("origin_file_uuid"); + +CREATE TABLE IF NOT EXISTS "drafts_draftreviewer" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "reviewer" varchar(255) NOT NULL, "draft_id" integer NOT NULL REFERENCES "drafts_draft" ("id")); +CREATE INDEX IF NOT EXISTS "drafts_draftreviewer_reviewer_e4c777ac" ON "drafts_draftreviewer" ("reviewer"); +CREATE INDEX IF NOT EXISTS "drafts_draftreviewer_draft_id_4ea59775" ON "drafts_draftreviewer" ("draft_id"); + +CREATE TABLE IF NOT EXISTS "social_auth_association" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "server_url" varchar(255) NOT NULL, "handle" varchar(255) NOT NULL, "secret" varchar(255) NOT NULL, "issued" integer NOT NULL, "lifetime" integer NOT NULL, "assoc_type" varchar(64) NOT NULL); +CREATE TABLE IF NOT EXISTS "social_auth_code" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "email" varchar(254) NOT NULL, "code" varchar(32) NOT NULL, "verified" bool NOT NULL, "timestamp" datetime NOT NULL); +CREATE TABLE IF NOT EXISTS "social_auth_nonce" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "server_url" varchar(255) NOT NULL, "timestamp" integer NOT NULL, "salt" varchar(65) NOT NULL); +CREATE TABLE IF NOT EXISTS "social_auth_partial" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(32) NOT NULL, "next_step" smallint unsigned NOT NULL, "backend" varchar(32) NOT NULL, "data" text NOT NULL, "timestamp" datetime NOT NULL); +CREATE TABLE IF NOT EXISTS "social_auth_usersocialauth" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "provider" varchar(32) NOT NULL, "uid" varchar(255) NOT NULL, "extra_data" text NOT NULL); + + +CREATE TABLE IF NOT EXISTS "repo_tags_repotags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "name" varchar(255) NOT NULL, "color" varchar(255) NOT NULL); +CREATE INDEX IF NOT EXISTS "repo_tags_repotags_repo_id_1163a48f" ON "repo_tags_repotags" ("repo_id"); +CREATE INDEX IF NOT EXISTS "repo_tags_repotags_name_3f4c9027" ON "repo_tags_repotags" ("name"); +CREATE INDEX IF NOT EXISTS "repo_tags_repotags_color_1292b6c1" ON "repo_tags_repotags" ("color"); + + +CREATE TABLE IF NOT EXISTS "file_tags_filetags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "file_uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"), "repo_tag_id" integer NOT NULL REFERENCES "repo_tags_repotags" ("id")); +CREATE INDEX IF NOT EXISTS "file_tags_filetags_file_uuid_id_e30f0ec8" ON "file_tags_filetags" ("file_uuid_id"); +CREATE INDEX IF NOT EXISTS "file_tags_filetags_repo_tag_id_c39660cb" ON "file_tags_filetags" ("repo_tag_id"); + + +CREATE TABLE IF NOT EXISTS "related_files_relatedfiles" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "o_uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"), "r_uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid")); +CREATE INDEX IF NOT EXISTS "related_files_relatedfiles_o_uuid_id_aaa8e613" ON "related_files_relatedfiles" ("o_uuid_id"); +CREATE INDEX IF NOT EXISTS "related_files_relatedfiles_r_uuid_id_031751df" ON "related_files_relatedfiles" ("r_uuid_id"); + + +ALTER TABLE "base_filecomment" ADD COLUMN "detail" text DEFAULT NULL; +ALTER TABLE "base_filecomment" ADD COLUMN "resolved" bool NOT NULL DEFAULT 0; +CREATE INDEX IF NOT EXISTS "base_filecomment_resolved_e0717eca" ON "base_filecomment" ("resolved"); + + +CREATE TABLE IF NOT EXISTS "base_reposecretkey" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL UNIQUE, "secret_key" varchar(44) NOT NULL); + diff --git a/scripts/upgrade/sql/7.1.0/mysql/seahub.sql b/scripts/upgrade/sql/7.1.0/mysql/seahub.sql new file mode 100644 index 0000000000..c6bb448dc1 --- /dev/null +++ b/scripts/upgrade/sql/7.1.0/mysql/seahub.sql @@ -0,0 +1,73 @@ +CREATE TABLE IF NOT EXISTS `base_reposecretkey` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `secret_key` varchar(44) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `repo_id` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +ALTER TABLE `constance_config` MODIFY `value` longtext DEFAULT NULL; +ALTER TABLE `constance_config` CHANGE `key` `constance_key` varchar(255) NOT NULL; + +DROP INDEX `drafts_draft_origin_file_uuid_7c003c98_uniq` ON `drafts_draft`; +ALTER TABLE `drafts_draft` ADD CONSTRAINT `drafts_draft_origin_file_uuid_7c003c98_uniq` UNIQUE (`origin_file_uuid`); +CREATE INDEX `drafts_draft_origin_repo_id_8978ca2c` ON `drafts_draft` (`origin_repo_id`); + + +CREATE TABLE IF NOT EXISTS `file_participants_fileparticipant` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(255) NOT NULL, + `uuid_id` char(32) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `file_participants_fileparticipant_uuid_id_username_c747dd36_uniq` (`uuid_id`,`username`), + CONSTRAINT `file_participants_fi_uuid_id_861b7339_fk_tags_file` FOREIGN KEY (`uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE IF NOT EXISTS `repo_api_tokens` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `app_name` varchar(255) NOT NULL, + `token` varchar(40) NOT NULL, + `generated_at` datetime NOT NULL, + `generated_by` varchar(255) NOT NULL, + `last_access` datetime NOT NULL, + `permission` varchar(15) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `token` (`token`), + KEY `repo_api_tokens_repo_id_47a50fef` (`repo_id`), + KEY `repo_api_tokens_app_name_7c395c31` (`app_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE IF NOT EXISTS `abuse_reports_abusereport` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `reporter` longtext DEFAULT NULL, + `repo_id` varchar(36) NOT NULL, + `repo_name` varchar(255) NOT NULL, + `file_path` longtext DEFAULT NULL, + `abuse_type` varchar(255) NOT NULL, + `description` longtext DEFAULT NULL, + `handled` tinyint(1) NOT NULL, + `time` datetime(6) NOT NULL, + PRIMARY KEY (`id`), + KEY `abuse_reports_abusereport_abuse_type_703d5335` (`abuse_type`), + KEY `abuse_reports_abusereport_handled_94b8304c` (`handled`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE IF NOT EXISTS `repo_share_invitation` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `path` longtext NOT NULL, + `permission` varchar(50) NOT NULL, + `invitation_id` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `repo_share_invitatio_invitation_id_b71effd2_fk_invitatio` (`invitation_id`), + KEY `repo_share_invitation_repo_id_7bcf84fa` (`repo_id`), + CONSTRAINT `repo_share_invitatio_invitation_id_b71effd2_fk_invitatio` FOREIGN KEY (`invitation_id`) REFERENCES `invitations_invitation` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `post_office_attachment` add column `headers` longtext DEFAULT NULL; + diff --git a/scripts/upgrade/sql/7.1.0/sqlite3/seahub.sql b/scripts/upgrade/sql/7.1.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..4af2aad3a0 --- /dev/null +++ b/scripts/upgrade/sql/7.1.0/sqlite3/seahub.sql @@ -0,0 +1,43 @@ +CREATE TABLE IF NOT EXISTS "base_reposecretkey" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL UNIQUE, "secret_key" varchar(44) NOT NULL); + + +DROP TABLE IF EXISTS "constance_config_old"; +ALTER TABLE "constance_config" RENAME TO "constance_config_old"; +CREATE TABLE IF NOT EXISTS "constance_config" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "constance_key" varchar(255) NOT NULL UNIQUE, "value" text NULL); +INSERT INTO "constance_config" ("id", "constance_key", "value") SELECT "id", "key", "value" FROM "constance_config_old"; +DROP TABLE "constance_config_old"; + + + +DROP TABLE IF EXISTS "drafts_draft_old"; +ALTER TABLE "drafts_draft" RENAME TO "drafts_draft_old"; +CREATE TABLE IF NOT EXISTS "drafts_draft" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL, "username" varchar(255) NOT NULL, "origin_file_version" varchar(100) NOT NULL, "draft_file_path" varchar(1024) NOT NULL, "origin_file_uuid" char(32) NOT NULL UNIQUE, "publish_file_version" varchar(100) NULL, "status" varchar(20) NOT NULL, "origin_repo_id" varchar(36) NOT NULL); +INSERT INTO "drafts_draft" ("id", "created_at", "updated_at", "username", "origin_file_version", "draft_file_path", "origin_file_uuid", "publish_file_version", "status", "origin_repo_id") SELECT "id", "created_at", "updated_at", "username", "origin_file_version", "draft_file_path", "origin_file_uuid", "publish_file_version", "status", "origin_repo_id" FROM "drafts_draft_old"; +DROP TABLE "drafts_draft_old"; + +CREATE INDEX IF NOT EXISTS "drafts_draft_created_at_e9f4523f" ON "drafts_draft" ("created_at"); +CREATE INDEX IF NOT EXISTS "drafts_draft_origin_repo_id_8978ca2c" ON "drafts_draft" ("origin_repo_id"); +CREATE INDEX IF NOT EXISTS "drafts_draft_updated_at_0a144b05" ON "drafts_draft" ("updated_at"); +CREATE INDEX IF NOT EXISTS "drafts_draft_username_73e6738b" ON "drafts_draft" ("username"); + + +CREATE TABLE IF NOT EXISTS "abuse_reports_abusereport" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "reporter" text NULL, "repo_id" varchar(36) NOT NULL, "repo_name" varchar(255) NOT NULL, "file_path" text NULL, "abuse_type" varchar(255) NOT NULL, "description" text NULL, "handled" bool NOT NULL, "time" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "abuse_reports_abusereport_abuse_type_703d5335" ON "abuse_reports_abusereport" ("abuse_type"); +CREATE INDEX IF NOT EXISTS "abuse_reports_abusereport_handled_94b8304c" ON "abuse_reports_abusereport" ("handled"); + + +CREATE TABLE IF NOT EXISTS "file_participants_fileparticipant" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid")); +CREATE UNIQUE INDEX IF NOT EXISTS "file_participants_fileparticipant_uuid_id_username_c747dd36_uniq" ON "file_participants_fileparticipant" ("uuid_id", "username"); +CREATE INDEX IF NOT EXISTS "file_participants_fileparticipant_uuid_id_861b7339" ON "file_participants_fileparticipant" ("uuid_id"); + + +CREATE TABLE IF NOT EXISTS "repo_share_invitation" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "path" text NOT NULL, "permission" varchar(50) NOT NULL, "invitation_id" integer NOT NULL REFERENCES "invitations_invitation" ("id")); +CREATE INDEX IF NOT EXISTS "repo_share_invitation_repo_id_7bcf84fa" ON "repo_share_invitation" ("repo_id"); +CREATE INDEX IF NOT EXISTS "repo_share_invitation_invitation_id_b71effd2" ON "repo_share_invitation" ("invitation_id"); + +CREATE TABLE IF NOT EXISTS "repo_api_tokens" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "app_name" varchar(255) NOT NULL, "token" varchar(40) NOT NULL UNIQUE, "generated_at" datetime NOT NULL, "generated_by" varchar(255) NOT NULL, "last_access" datetime NOT NULL, "permission" varchar(15) NOT NULL); +CREATE INDEX IF NOT EXISTS "repo_api_tokens_repo_id_47a50fef" ON "repo_api_tokens" ("repo_id"); +CREATE INDEX IF NOT EXISTS "repo_api_tokens_app_name_7c395c31" ON "repo_api_tokens" ("app_name"); + +ALTER TABLE "post_office_attachment" add column "headers" text DEFAULT NULL; + diff --git a/scripts/upgrade/sql/8.0.0/mysql/seafevents.sql b/scripts/upgrade/sql/8.0.0/mysql/seafevents.sql new file mode 100644 index 0000000000..57611397d1 --- /dev/null +++ b/scripts/upgrade/sql/8.0.0/mysql/seafevents.sql @@ -0,0 +1,4 @@ +ALTER TABLE `VirusFile` ADD COLUMN `has_ignored` TINYINT(1) NOT NULL DEFAULT 0; +ALTER TABLE `VirusFile` CHANGE `has_handle` `has_deleted` TINYINT(1); +ALTER TABLE `VirusFile` ADD INDEX `has_deleted` (`has_deleted`); +ALTER TABLE `VirusFile` ADD INDEX `has_ignored` (`has_ignored`); diff --git a/scripts/upgrade/sql/8.0.0/mysql/seahub.sql b/scripts/upgrade/sql/8.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..2c4a48ae10 --- /dev/null +++ b/scripts/upgrade/sql/8.0.0/mysql/seahub.sql @@ -0,0 +1,57 @@ +CREATE TABLE IF NOT EXISTS `ocm_share` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `shared_secret` varchar(36) NOT NULL, + `from_user` varchar(255) NOT NULL, + `to_user` varchar(255) NOT NULL, + `to_server_url` varchar(200) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `repo_name` varchar(255) NOT NULL, + `permission` varchar(50) NOT NULL, + `path` longtext NOT NULL, + `ctime` datetime(6) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `shared_secret` (`shared_secret`), + KEY `ocm_share_from_user_7fbb7bb6` (`from_user`), + KEY `ocm_share_to_user_4e255523` (`to_user`), + KEY `ocm_share_to_server_url_43f0e89b` (`to_server_url`), + KEY `ocm_share_repo_id_51937581` (`repo_id`) +) ENGINE = InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `ocm_share_received` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `shared_secret` varchar(36) NOT NULL, + `from_user` varchar(255) NOT NULL, + `to_user` varchar(255) NOT NULL, + `from_server_url` varchar(200) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `repo_name` varchar(255) NOT NULL, + `permission` varchar(50) NOT NULL, + `path` longtext NOT NULL, + `provider_id` varchar(40) NOT NULL, + `ctime` datetime(6) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `shared_secret` (`shared_secret`), + KEY `ocm_share_received_from_user_8137d8eb` (`from_user`), + KEY `ocm_share_received_to_user_0921d09a` (`to_user`), + KEY `ocm_share_received_from_server_url_10527b80` (`from_server_url`), + KEY `ocm_share_received_repo_id_9e77a1b9` (`repo_id`), + KEY `ocm_share_received_provider_id_60c873e0` (`provider_id`) +) ENGINE = InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `repo_auto_delete` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `days` int(11) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `repo_id` (`repo_id`) +) ENGINE = InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `external_department` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `group_id` int(11) NOT NULL, + `provider` varchar(32) NOT NULL, + `outer_id` bigint(20) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `group_id` (`group_id`), + UNIQUE KEY `external_department_provider_outer_id_8dns6vkw_uniq` (`provider`,`outer_id`) +) ENGINE = InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/8.0.0/sqlite3/seafevents.sql b/scripts/upgrade/sql/8.0.0/sqlite3/seafevents.sql new file mode 100644 index 0000000000..2bc16950e1 --- /dev/null +++ b/scripts/upgrade/sql/8.0.0/sqlite3/seafevents.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS "VirusFile_old"; +ALTER TABLE "VirusFile" RENAME TO "VirusFile_old"; +CREATE TABLE IF NOT EXISTS "VirusFile" ("vid" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "commit_id" varchar(40) NOT NULL, "file_path" text NOT NULL, "has_deleted" tinyint(1) NOT NULL, "has_ignored" TINYINT(1) NOT NULL DEFAULT 0); +INSERT INTO "VirusFile" ("vid", "repo_id", "commit_id", "file_path", "has_deleted") SELECT "vid", "repo_id", "commit_id", "file_path", "has_handle" FROM "VirusFile_old"; +DROP TABLE "VirusFile_old"; + +CREATE INDEX IF NOT EXISTS "VirusFile_repo_id_yewnci4gd" ON "VirusFile" ("repo_id"); +CREATE INDEX IF NOT EXISTS "VirusFile_has_deleted_834ndyts" ON "VirusFile" ("has_deleted"); +CREATE INDEX IF NOT EXISTS "VirusFile_has_ignored_d84tvuwg" ON "VirusFile" ("has_ignored"); diff --git a/scripts/upgrade/sql/8.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/8.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..0d6a52b24e --- /dev/null +++ b/scripts/upgrade/sql/8.0.0/sqlite3/seahub.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS "ocm_share" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "shared_secret" varchar(36) NOT NULL UNIQUE, "from_user" varchar(255) NOT NULL, "to_user" varchar(255) NOT NULL, "to_server_url" varchar(200) NOT NULL, "repo_id" varchar(36) NOT NULL, "repo_name" varchar(255) NOT NULL, "permission" varchar(50) NOT NULL, "path" text NOT NULL, "ctime" datetime(6) NOT NULL); +CREATE INDEX IF NOT EXISTS "ocm_share_from_user_7fbb7bb6" ON "ocm_share" ("from_user"); +CREATE INDEX IF NOT EXISTS "ocm_share_to_user_4e255523" ON "ocm_share" ("to_user"); +CREATE INDEX IF NOT EXISTS "ocm_share_to_server_url_43f0e89b" ON "ocm_share" ("to_server_url"); +CREATE INDEX IF NOT EXISTS "ocm_share_repo_id_51937581" ON "ocm_share" ("repo_id"); + +CREATE TABLE IF NOT EXISTS "ocm_share_received" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "shared_secret" varchar(36) NOT NULL UNIQUE, "from_user" varchar(255) NOT NULL, "to_user" varchar(255) NOT NULL, "from_server_url" varchar(200) NOT NULL, "repo_id" varchar(36) NOT NULL, "repo_name" varchar(255) NOT NULL, "permission" varchar(50) NOT NULL, "path" text NOT NULL, "provider_id" varchar(40) NOT NULL, "ctime" datetime(6) NOT NULL); +CREATE INDEX IF NOT EXISTS "ocm_share_received_from_user_8137d8eb" ON "ocm_share_received" ("from_user"); +CREATE INDEX IF NOT EXISTS "ocm_share_received_to_user_0921d09a" ON "ocm_share_received" ("to_user"); +CREATE INDEX IF NOT EXISTS "ocm_share_received_from_server_url_10527b80" ON "ocm_share_received" ("from_server_url"); +CREATE INDEX IF NOT EXISTS "ocm_share_received_repo_id_9e77a1b9" ON "ocm_share_received" ("repo_id"); +CREATE INDEX IF NOT EXISTS "ocm_share_received_provider_id_60c873e0" ON "ocm_share_received" ("provider_id"); + +CREATE TABLE IF NOT EXISTS "repo_auto_delete" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL UNIQUE, "days" integer NOT NULL); + +CREATE TABLE IF NOT EXISTS "external_department" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "group_id" integer NOT NULL UNIQUE, "provider" varchar(32) NOT NULL, "outer_id" bigint NOT NULL); +CREATE UNIQUE INDEX IF NOT EXISTS "external_department_provider_outer_id_8dns6vkw_uniq" ON "external_department" (`provider`,`outer_id`); diff --git a/scripts/upgrade/sql/9.0.0/mysql/seafevents.sql b/scripts/upgrade/sql/9.0.0/mysql/seafevents.sql new file mode 100644 index 0000000000..2d9ef35863 --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/mysql/seafevents.sql @@ -0,0 +1,2 @@ +ALTER TABLE `FileAudit` ADD INDEX `ix_FileAudit_user` (`user`); +ALTER TABLE `FileAudit` ADD INDEX `ix_FileAudit_repo_id` (`repo_id`); diff --git a/scripts/upgrade/sql/9.0.0/mysql/seafile.sql b/scripts/upgrade/sql/9.0.0/mysql/seafile.sql new file mode 100644 index 0000000000..aed07d16d2 --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/mysql/seafile.sql @@ -0,0 +1,2 @@ +ALTER TABLE `RepoUserToken` ADD INDEX `RepoUserToken_token` (`token`); +ALTER TABLE `RepoTokenPeerInfo` ADD INDEX `RepoTokenPeerInfo_peer_id` (`peer_id`); diff --git a/scripts/upgrade/sql/9.0.0/mysql/seahub.sql b/scripts/upgrade/sql/9.0.0/mysql/seahub.sql new file mode 100644 index 0000000000..a74835d22a --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/mysql/seahub.sql @@ -0,0 +1,51 @@ +ALTER TABLE `api2_tokenv2` CHANGE COLUMN `device_name` `device_name` varchar(40) CHARACTER SET 'utf8mb4' COLLATE utf8mb4_unicode_ci NOT NULL; + +CREATE TABLE `custom_share_permission` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `repo_id` varchar(36) NOT NULL, + `name` varchar(255) NOT NULL, + `description` varchar(500) NOT NULL, + `permission` longtext NOT NULL, + PRIMARY KEY (`id`), + KEY `custom_share_permission_repo_id_578fe49f` (`repo_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `ocm_via_webdav_received_shares` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `description` varchar(255) DEFAULT NULL, + `name` varchar(255) NOT NULL, + `owner` varchar(255) NOT NULL, + `owner_display_name` varchar(255) DEFAULT NULL, + `protocol_name` varchar(255) NOT NULL, + `shared_secret` varchar(255) NOT NULL, + `permissions` varchar(255) NOT NULL, + `provider_id` varchar(255) NOT NULL, + `resource_type` varchar(255) NOT NULL, + `share_type` varchar(255) NOT NULL, + `share_with` varchar(255) NOT NULL, + `shared_by` varchar(255) NOT NULL, + `shared_by_display_name` varchar(255) DEFAULT NULL, + `ctime` datetime(6) NOT NULL, + `is_dir` tinyint(1) NOT NULL, + PRIMARY KEY (`id`), + KEY `ocm_via_webdav_share_received_owner_261eaa70` (`owner`), + KEY `ocm_via_webdav_share_received_shared_secret_fbb6be5a` (`shared_secret`), + KEY `ocm_via_webdav_share_received_provider_id_a55680e9` (`provider_id`), + KEY `ocm_via_webdav_share_received_resource_type_a3c71b57` (`resource_type`), + KEY `ocm_via_webdav_share_received_share_type_7615aaab` (`share_type`), + KEY `ocm_via_webdav_share_received_share_with_5a23eb17` (`share_with`), + KEY `ocm_via_webdav_share_received_shared_by_1786d580` (`shared_by`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `onlyoffice_onlyofficedockey` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `doc_key` varchar(36) NOT NULL, + `username` varchar(255) NOT NULL, + `repo_id` varchar(36) NOT NULL, + `file_path` longtext NOT NULL, + `repo_id_file_path_md5` varchar(100) NOT NULL, + `created_time` datetime(6) NOT NULL, + PRIMARY KEY (`id`), + KEY `onlyoffice_onlyofficedockey_doc_key_edba1352` (`doc_key`), + KEY `onlyoffice_onlyofficedockey_repo_id_file_path_md5_52002073` (`repo_id_file_path_md5`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/scripts/upgrade/sql/9.0.0/sqlite3/seafevents.sql b/scripts/upgrade/sql/9.0.0/sqlite3/seafevents.sql new file mode 100644 index 0000000000..5e67711757 --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/sqlite3/seafevents.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS "ix_FileAudit_user" ON "FileAudit" ("user"); +CREATE INDEX IF NOT EXISTS "ix_FileAudit_repo_id" ON "FileAudit" ("repo_id"); diff --git a/scripts/upgrade/sql/9.0.0/sqlite3/seafile.sql b/scripts/upgrade/sql/9.0.0/sqlite3/seafile.sql new file mode 100644 index 0000000000..ed5e0fbcad --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/sqlite3/seafile.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS "RepoUserToken_token" ON "RepoUserToken" ("token"); +CREATE INDEX IF NOT EXISTS "RepoTokenPeerInfo_peer_id" ON "RepoTokenPeerInfo" ("peer_id"); diff --git a/scripts/upgrade/sql/9.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/9.0.0/sqlite3/seahub.sql new file mode 100644 index 0000000000..fcec8940a7 --- /dev/null +++ b/scripts/upgrade/sql/9.0.0/sqlite3/seahub.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS "custom_share_permission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "name" varchar(255) NOT NULL, "description" varchar(500) NOT NULL, "permission" , "reporter" text NOT NULL); +CREATE INDEX IF NOT EXISTS "custom_share_permission_repo_id_578fe49f" ON "custom_share_permission" ("repo_id"); + +CREATE TABLE IF NOT EXISTS "ocm_via_webdav_received_shares" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "description" varchar(255) NULL, "name" varchar(255) NOT NULL, "owner" varchar(255) NOT NULL, "owner_display_name" varchar(255) NULL, "protocol_name" varchar(255) NOT NULL, "shared_secret" varchar(255) NOT NULL, "permissions" varchar(255) NOT NULL, "provider_id" varchar(255) NOT NULL, "resource_type" varchar(255) NOT NULL, "share_type" varchar(255) NOT NULL, "share_with" varchar(255) NOT NULL, "shared_by" varchar(255) NOT NULL, "shared_by_display_name" varchar(255) NOT NULL, "ctime" datetime NOT NULL, "is_dir" bool NOT NULL); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_owner_261eaa70" ON "ocm_via_webdav_received_shares" ("owner"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_shared_secret_fbb6be5a" ON "ocm_via_webdav_received_shares" ("shared_secret"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_provider_id_a55680e9" ON "ocm_via_webdav_received_shares" ("provider_id"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_resource_type_a3c71b57" ON "ocm_via_webdav_received_shares" ("resource_type"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_share_type_7615aaab" ON "ocm_via_webdav_received_shares" ("share_type"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_share_with_5a23eb17" ON "ocm_via_webdav_received_shares" ("share_with"); +CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_shared_by_1786d580" ON "ocm_via_webdav_received_shares" ("shared_by"); + +CREATE TABLE IF NOT EXISTS "onlyoffice_onlyofficedockey" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "doc_key" varchar(36) NOT NULL, "username" varchar(255) NOT NULL, "repo_id" varchar(36) NULL, "file_path" TEXT NOT NULL, "repo_id_file_path_md5" varchar(100) NOT NULL, "created_time" datetime NOT NULL); +CREATE INDEX IF NOT EXISTS "onlyoffice_onlyofficedockey_doc_key_edba1352" ON "onlyoffice_onlyofficedockey" ("doc_key"); +CREATE INDEX IF NOT EXISTS "onlyoffice_onlyofficedockey_repo_id_file_path_md5_52002073" ON "onlyoffice_onlyofficedockey" ("repo_id_file_path_md5"); diff --git a/scripts/upgrade/upgrade_1.2_1.3.sh b/scripts/upgrade/upgrade_1.2_1.3.sh new file mode 100755 index 0000000000..3bceb1e363 --- /dev/null +++ b/scripts/upgrade/upgrade_1.2_1.3.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.2.0 +current_version=1.3.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +# run django syncdb command +echo "------------------------------" +echo "updating seahub database ... " +echo +manage_py=${INSTALLPATH}/seahub/manage.py +pushd "${INSTALLPATH}/seahub" 2>/dev/null 1>&2 +if ! $PYTHON manage.py syncdb 2>/dev/null 1>&2; then + echo "failed" + exit -1 +fi +popd 2>/dev/null 1>&2 + +echo "DONE" +echo "------------------------------" +echo + +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo \ No newline at end of file diff --git a/scripts/upgrade/upgrade_1.3_1.4.sh b/scripts/upgrade/upgrade_1.3_1.4.sh new file mode 100755 index 0000000000..cbdc24646a --- /dev/null +++ b/scripts/upgrade/upgrade_1.3_1.4.sh @@ -0,0 +1,119 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.3 +current_version=1.4.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +# update database +echo "------------------------------" +echo "updating seahub database ... " +echo + +db_update_py=$UPGRADE_DIR/db_update_1.3_1.4.py +if ! $PYTHON $db_update_py $default_seahub_db 1>/dev/null; then + echo "failed" +fi + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/upgrade_1.4_1.5.sh b/scripts/upgrade/upgrade_1.4_1.5.sh new file mode 100755 index 0000000000..7cc7ab550f --- /dev/null +++ b/scripts/upgrade/upgrade_1.4_1.5.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.4 +current_version=1.5 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo \ No newline at end of file diff --git a/scripts/upgrade/upgrade_1.5_1.6.sh b/scripts/upgrade/upgrade_1.5_1.6.sh new file mode 100755 index 0000000000..9028108281 --- /dev/null +++ b/scripts/upgrade/upgrade_1.5_1.6.sh @@ -0,0 +1,122 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.5 +current_version=1.6 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +echo +echo "------------------------------" +echo "Updating seahub database ..." +echo + +seahub_db=${TOPDIR}/seahub.db +seahub_sql=${UPGRADE_DIR}/sql/1.6.0/sqlite3/seahub.sql +if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then + echo "Failed to update seahub database" + exit 1 +fi + +echo "DONE" +echo "------------------------------" +echo \ No newline at end of file diff --git a/scripts/upgrade/upgrade_1.6_1.7.sh b/scripts/upgrade/upgrade_1.6_1.7.sh new file mode 100755 index 0000000000..b9d1dc0395 --- /dev/null +++ b/scripts/upgrade/upgrade_1.6_1.7.sh @@ -0,0 +1,137 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.6 +current_version=1.7 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +echo +echo "------------------------------" +echo "Updating seafile/seahub database ..." +echo + +seahub_db=${TOPDIR}/seahub.db +seahub_sql=${UPGRADE_DIR}/sql/1.7.0/sqlite3/seahub.sql +if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then + echo "Failed to update seahub database" + exit 1 +fi + +seafile_db=${seafile_data_dir}/seafile.db +seafile_sql=${UPGRADE_DIR}/sql/1.7.0/sqlite3/seafile.sql +if ! sqlite3 "${seafile_db}" < "${seafile_sql}"; then + echo "Failed to update seafile database" + exit 1 +fi + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/upgrade_1.7_1.8.sh b/scripts/upgrade/upgrade_1.7_1.8.sh new file mode 100755 index 0000000000..f2c94c0e75 --- /dev/null +++ b/scripts/upgrade/upgrade_1.7_1.8.sh @@ -0,0 +1,130 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.7 +current_version=1.8 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +echo +echo "------------------------------" +echo "Updating seafile/seahub database ..." +echo + +seahub_db=${TOPDIR}/seahub.db +seahub_sql=${UPGRADE_DIR}/sql/1.8.0/sqlite3/seahub.sql +if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then + echo "Failed to update seahub database" + exit 1 +fi + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/upgrade_1.8_2.0.sh b/scripts/upgrade/upgrade_1.8_2.0.sh new file mode 100755 index 0000000000..c5cdbafe6a --- /dev/null +++ b/scripts/upgrade/upgrade_1.8_2.0.sh @@ -0,0 +1,137 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=1.8 +current_version=2.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +check_python_executable +read_seafile_data_dir + +export SEAFILE_CONF_DIR=$seafile_data_dir + +# test whether seafile server has been stopped. +if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 +fi + +echo +echo "------------------------------" +echo "migrating avatars ..." +echo +media_dir=${INSTALLPATH}/seahub/media +orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars +dest_avatar_dir=${TOPDIR}/seahub-data/avatars + +# move "media/avatars" directory outside +if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars ${media_dir} + +elif [[ ! -L ${orig_avatar_dir} ]]; then + mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars ${media_dir} +fi + +echo "DONE" +echo "------------------------------" +echo + +echo +echo "------------------------------" +echo "Updating seafile/seahub database ..." +echo + +seahub_db=${TOPDIR}/seahub.db +seahub_sql=${UPGRADE_DIR}/sql/2.0.0/sqlite3/seahub.sql +if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then + echo "Failed to update seahub database" + exit 1 +fi + +add_collate_script=${UPGRADE_DIR}/add_collate.sh +echo "fix seafile database case issues..." +if ! ${add_collate_script} ${default_ccnet_conf_dir} ${seafile_data_dir} ${seahub_db}; then + echo "Failed." + exit 1 +fi + +echo "DONE" +echo "------------------------------" +echo diff --git a/scripts/upgrade/upgrade_2.0_2.1.sh b/scripts/upgrade/upgrade_2.0_2.1.sh new file mode 100755 index 0000000000..5f5dbb4eb0 --- /dev/null +++ b/scripts/upgrade/upgrade_2.0_2.1.sh @@ -0,0 +1,206 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH + +prev_version=2.0 +current_version=2.1 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 2.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + seafile_server_symlink=${TOPDIR}/seafile-server-latest + if [[ -L "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function gen_seafdav_conf() { + echo + echo "generating seafdav.conf ..." + echo + seafdav_conf=${default_conf_dir}/seafdav.conf + mkdir -p "${default_conf_dir}" + if ! $(cat > "${seafdav_conf}" </dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 2.2.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + seafile_server_symlink=${TOPDIR}/seafile-server-latest + if [[ -L "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +export SEAFILE_CONF_DIR=$seafile_data_dir + +migrate_avatars; + +update_database; + +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_2.2_3.0.sh b/scripts/upgrade/upgrade_2.2_3.0.sh new file mode 100755 index 0000000000..d0643d3a65 --- /dev/null +++ b/scripts/upgrade/upgrade_2.2_3.0.sh @@ -0,0 +1,191 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=2.2 +current_version=3.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 3.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + seafile_server_symlink=${TOPDIR}/seafile-server-latest + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function migrate_seafile_data_format() { + seaf_migrate=${INSTALLPATH}/seafile/bin/seaf-migrate + echo + echo "now migrating seafile data to 3.0 format" + echo + if ! LD_LIBRARY_PATH=${SEAFILE_LD_LIBRARY_PATH} ${seaf_migrate} \ + -c "${default_ccnet_conf_dir}" -d "${seafile_data_dir}"; then + echo + echo "Failed to migrate seafile data to 3.0 format" + echo + exit 1; + fi + echo + echo "Successfully migrated seafile data to 3.0 format" + echo +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +export SEAFILE_CONF_DIR=$seafile_data_dir + +migrate_seafile_data_format; + +migrate_avatars; + +update_database; + +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_3.0_3.1.sh b/scripts/upgrade/upgrade_3.0_3.1.sh new file mode 100755 index 0000000000..d3b7f4f2f8 --- /dev/null +++ b/scripts/upgrade/upgrade_3.0_3.1.sh @@ -0,0 +1,215 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=3.0 +current_version=3.1 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 3.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +migrate_avatars; + +update_database; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_3.1_4.0.sh b/scripts/upgrade/upgrade_3.1_4.0.sh new file mode 100755 index 0000000000..6a6bc9a9e3 --- /dev/null +++ b/scripts/upgrade/upgrade_3.1_4.0.sh @@ -0,0 +1,215 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=3.1 +current_version=4.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +migrate_avatars; + +update_database; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.0_4.1.sh b/scripts/upgrade/upgrade_4.0_4.1.sh new file mode 100755 index 0000000000..e3eb848d00 --- /dev/null +++ b/scripts/upgrade/upgrade_4.0_4.1.sh @@ -0,0 +1,234 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.0 +current_version=4.1 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function fix_mysql_user() { + + fix_script=${UPGRADE_DIR}/fix_mysql_user.py + if ! $PYTHON "${fix_script}"; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +fix_mysql_user; +update_database; + +migrate_avatars; + + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +chmod 0600 "$seahub_settings_py" +chmod 0700 "$seafile_data_dir" +chmod 0700 "$default_ccnet_conf_dir" +chmod 0700 "$default_conf_dir" + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.1_4.2.sh b/scripts/upgrade/upgrade_4.1_4.2.sh new file mode 100755 index 0000000000..f1754387b4 --- /dev/null +++ b/scripts/upgrade/upgrade_4.1_4.2.sh @@ -0,0 +1,216 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.1 +current_version=4.2 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.2.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; + +migrate_avatars; + + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.2_4.3.sh b/scripts/upgrade/upgrade_4.2_4.3.sh new file mode 100755 index 0000000000..b79035d2e9 --- /dev/null +++ b/scripts/upgrade/upgrade_4.2_4.3.sh @@ -0,0 +1,226 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.2 +current_version=4.3 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.3.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function regenerate_secret_key() { + regenerate_secret_key_script=$UPGRADE_DIR/regenerate_secret_key.sh + if ! $regenerate_secret_key_script ; then + echo "Failed to regenerate the seahub secret key" + exit 1 + fi +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +regenerate_secret_key; + +update_database; + +migrate_avatars; + + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.3_4.4.sh b/scripts/upgrade/upgrade_4.3_4.4.sh new file mode 100755 index 0000000000..c92b5b37d8 --- /dev/null +++ b/scripts/upgrade/upgrade_4.3_4.4.sh @@ -0,0 +1,216 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_seafile_data_dir=${TOPDIR}/seafile-data +default_seahub_db=${TOPDIR}/seahub.db +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.3 +current_version=4.4 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 4.4.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; + +migrate_avatars; + + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_4.4_5.0.sh b/scripts/upgrade/upgrade_4.4_5.0.sh new file mode 100755 index 0000000000..dbec11d02d --- /dev/null +++ b/scripts/upgrade/upgrade_4.4_5.0.sh @@ -0,0 +1,243 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=4.4 +current_version=5.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + elif which python2.6 2>/dev/null 1>&2; then + PYTHON=python2.6 + elif which python26 2>/dev/null 1>&2; then + PYTHON=python26 + else + echo + echo "Can't find a python executable of version 2.6 or above in PATH" + echo "Install python 2.6+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 5.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function regenerate_secret_key() { + regenerate_secret_key_script=$UPGRADE_DIR/regenerate_secret_key.sh + if ! $regenerate_secret_key_script ; then + echo "Failed to regenerate the seahub secret key" + exit 1 + fi +} + +# copy ccnet.conf/seafile.conf etc. to conf/ dir, and make the original files read-only +function copy_confs_to_central_conf_dir() { + local confs=( + $default_ccnet_conf_dir/ccnet.conf + $seafile_data_dir/seafile.conf + $seahub_settings_py + ) + for conffile in ${confs[*]}; do + if grep -q "This file has been moved" $conffile; then + continue + fi + cp $conffile $conffile.seafile-5.0.0-bak + cp -av $conffile $default_conf_dir/ + cat >$conffile</dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 5.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function regenerate_secret_key() { + regenerate_secret_key_script=$UPGRADE_DIR/regenerate_secret_key.sh + if ! $regenerate_secret_key_script ; then + echo "Failed to regenerate the seahub secret key" + exit 1 + fi +} + +# copy ccnet.conf/seafile.conf etc. to conf/ dir, and make the original files read-only +function copy_confs_to_central_conf_dir() { + local confs=( + $default_ccnet_conf_dir/ccnet.conf + $seafile_data_dir/seafile.conf + $seahub_settings_py + ) + for conffile in ${confs[*]}; do + if grep -q "This file has been moved" $conffile; then + continue + fi + cp $conffile $conffile.seafile-5.0.0-bak + cp -av $conffile $default_conf_dir/ + cat >$conffile</dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 6.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_6.0_6.1.sh b/scripts/upgrade/upgrade_6.0_6.1.sh new file mode 100755 index 0000000000..4545092311 --- /dev/null +++ b/scripts/upgrade/upgrade_6.0_6.1.sh @@ -0,0 +1,210 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=6.0 +current_version=6.1 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 6.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_6.1_6.2.sh b/scripts/upgrade/upgrade_6.1_6.2.sh new file mode 100755 index 0000000000..b963b3ad3b --- /dev/null +++ b/scripts/upgrade/upgrade_6.1_6.2.sh @@ -0,0 +1,210 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=6.1 +current_version=6.2 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 6.2.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_6.2_6.3.sh b/scripts/upgrade/upgrade_6.2_6.3.sh new file mode 100755 index 0000000000..1a0a167643 --- /dev/null +++ b/scripts/upgrade/upgrade_6.2_6.3.sh @@ -0,0 +1,239 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +default_pids_dir=${TOPDIR}/pids +default_logs_dir=${TOPDIR}/logs +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=6.2 +current_version=6.3 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python2.7 2>/dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 6.3.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function add_gunicorn_conf() { + gunicorn_conf=${default_conf_dir}/gunicorn.conf + if ! $(cat > ${gunicorn_conf} </dev/null 1>&2; then + PYTHON=python2.7 + elif which python27 2>/dev/null 1>&2; then + PYTHON=python27 + else + echo + echo "Can't find a python executable of version 2.7 or above in PATH" + echo "Install python 2.7+ before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 7.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function add_gunicorn_conf() { + gunicorn_conf=${default_conf_dir}/gunicorn.conf + if ! $(cat > ${gunicorn_conf} </dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function read_seafile_data_dir () { + seafile_ini=${default_ccnet_conf_dir}/seafile.ini + if [[ ! -f ${seafile_ini} ]]; then + echo "${seafile_ini} not found. Now quit" + exit 1 + fi + seafile_data_dir=$(cat "${seafile_ini}") + if [[ ! -d ${seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + else + if [[ ${seafile_data_dir} != ${TOPDIR}/seafile-data ]]; then + if [[ ! -L ${TOPDIR}/seafile-data ]]; then + ln -s ${seafile_data_dir} ${TOPDIR}/seafile-data + echo "Created the symlink ${TOPDIR}/seafile-data for ${seafile_data_dir}." + fi + fi + fi + + export SEAFILE_CONF_DIR=$seafile_data_dir +} + +function rename_gunicorn_config() { + echo + echo "renaming the gunicorn.conf to gunicorn.conf.py ..." + echo + if [[ -f "${default_conf_dir}/gunicorn.conf" ]]; then + mv "${default_conf_dir}/gunicorn.conf" "${default_conf_dir}/gunicorn.conf.py" 1>/dev/null + fi + + if [[ -f "${default_conf_dir}/gunicorn.conf.py" ]]; then + echo 'Done' + else + echo "Failed to renamed the gunicorn.conf to gunicorn.conf.py." + exit 1 + fi +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 7.1.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +read_seafile_data_dir; +rename_gunicorn_config; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_7.1_8.0.sh b/scripts/upgrade/upgrade_7.1_8.0.sh new file mode 100755 index 0000000000..793dac162a --- /dev/null +++ b/scripts/upgrade/upgrade_7.1_8.0.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +default_pids_dir=${TOPDIR}/pids +default_logs_dir=${TOPDIR}/logs +default_seafile_data_dir=${TOPDIR}/seafile-data +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CONF_DIR=${default_seafile_data_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3.6/site-packages:${INSTALLPATH}/seafile/lib64/python3.6/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=7.1 +current_version=8.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function check_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${default_seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 8.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +check_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/upgrade_8.0_9.0.sh b/scripts/upgrade/upgrade_8.0_9.0.sh new file mode 100755 index 0000000000..3d09b11189 --- /dev/null +++ b/scripts/upgrade/upgrade_8.0_9.0.sh @@ -0,0 +1,220 @@ +#!/bin/bash + +SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh +UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/ +INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/ +TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/ +default_ccnet_conf_dir=${TOPDIR}/ccnet +default_conf_dir=${TOPDIR}/conf +default_pids_dir=${TOPDIR}/pids +default_logs_dir=${TOPDIR}/logs +default_seafile_data_dir=${TOPDIR}/seafile-data +seafile_server_symlink=${TOPDIR}/seafile-server-latest +seahub_data_dir=${TOPDIR}/seahub-data +seahub_settings_py=${TOPDIR}/seahub_settings.py + +manage_py=${INSTALLPATH}/seahub/manage.py + +export CCNET_CONF_DIR=${default_ccnet_conf_dir} +export SEAFILE_CONF_DIR=${default_seafile_data_dir} +export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir} +export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH +export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH} + +prev_version=8.0 +current_version=9.0 + +echo +echo "-------------------------------------------------------------" +echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}" +echo "Press [ENTER] to contiune" +echo "-------------------------------------------------------------" +echo +read dummy + +function check_python_executable() { + if [[ "$PYTHON" != "" && -x $PYTHON ]]; then + return 0 + fi + + if which python3 2>/dev/null 1>&2; then + PYTHON=python3 + elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then + echo + echo "The current version of python is not 3.x.x, please use Python 3.x.x ." + echo + exit 1 + else + PYTHON="python"$(python --version | cut -b 8-10) + if !which $PYTHON 2>/dev/null 1>&2; then + echo + echo "Can't find a python executable of $PYTHON in PATH" + echo "Install $PYTHON before continue." + echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it" + echo + exit 1 + fi + fi +} + +function check_seafile_data_dir () { + if [[ ! -d ${default_seafile_data_dir} ]]; then + echo "Your seafile server data directory \"${default_seafile_data_dir}\" is invalid or doesn't exits." + echo "Please check it first, or create this directory yourself." + echo "" + exit 1; + fi +} + +function ensure_server_not_running() { + # test whether seafile server has been stopped. + if pgrep seaf-server 2>/dev/null 1>&2 ; then + echo + echo "seafile server is still running !" + echo "stop it using scripts before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 \ + || pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + elif pgrep -f "${manage_py} runfcgi" 2>/dev/null 1>&2 ; then + echo + echo "seahub server is still running !" + echo "stop it before upgrade." + echo + exit 1 + fi +} + +function migrate_avatars() { + echo + echo "migrating avatars ..." + echo + media_dir=${INSTALLPATH}/seahub/media + orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars + dest_avatar_dir=${TOPDIR}/seahub-data/avatars + + # move "media/avatars" directory outside + if [[ ! -d ${dest_avatar_dir} ]]; then + mkdir -p "${TOPDIR}/seahub-data" + mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2 + ln -s ../../../seahub-data/avatars "${media_dir}" + + elif [[ ! -L ${orig_avatar_dir} ]]; then + mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2 + rm -rf "${orig_avatar_dir}" + ln -s ../../../seahub-data/avatars "${media_dir}" + fi + echo "Done" +} + +function update_database() { + echo + echo "Updating seafile/seahub database ..." + echo + + db_update_helper=${UPGRADE_DIR}/db_update_helper.py + if ! $PYTHON "${db_update_helper}" 9.0.0; then + echo + echo "Failed to upgrade your database" + echo + exit 1 + fi + echo "Done" +} + +function upgrade_seafile_server_latest_symlink() { + # update the symlink seafile-server to the new server version + if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then + echo + printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n" + echo + if ! rm -f "${seafile_server_symlink}"; then + echo "Failed to remove ${seafile_server_symlink}" + echo + exit 1; + fi + + if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then + echo "Failed to update ${seafile_server_symlink} symbolic link." + echo + exit 1; + fi + fi +} + +function make_media_custom_symlink() { + media_symlink=${INSTALLPATH}/seahub/media/custom + if [[ -L "${media_symlink}" ]]; then + return + + elif [[ ! -e "${media_symlink}" ]]; then + ln -s ../../../seahub-data/custom "${media_symlink}" + return + + + elif [[ -d "${media_symlink}" ]]; then + cp -rf "${media_symlink}" "${seahub_data_dir}/" + rm -rf "${media_symlink}" + ln -s ../../../seahub-data/custom "${media_symlink}" + fi + +} + +function move_old_customdir_outside() { + # find the path of the latest seafile server folder + if [[ -L ${seafile_server_symlink} ]]; then + latest_server=$(readlink -f "${seafile_server_symlink}") + else + return + fi + + old_customdir=${latest_server}/seahub/media/custom + + # old customdir is already a symlink, do nothing + if [[ -L "${old_customdir}" ]]; then + return + fi + + # old customdir does not exist, do nothing + if [[ ! -e "${old_customdir}" ]]; then + return + fi + + # media/custom exist and is not a symlink + cp -rf "${old_customdir}" "${seahub_data_dir}/" +} + +function update_seahub_settings () { + service_url=`awk -F '=' '/\[General\]/{a=1}a==1&&$1~/SERVICE_URL/{print $2;exit}' ${default_conf_dir}/ccnet.conf` + service_url=$(echo $service_url) + echo "SERVICE_URL = '${service_url}'">>${default_conf_dir}/seahub_settings.py +} + +################# +# The main execution flow of the script +################ + +check_python_executable; +check_seafile_data_dir; +ensure_server_not_running; + +update_database; +migrate_avatars; + +update_seahub_settings; + +move_old_customdir_outside; +make_media_custom_symlink; +upgrade_seafile_server_latest_symlink; + + +echo +echo "-----------------------------------------------------------------" +echo "Upgraded your seafile server successfully." +echo "-----------------------------------------------------------------" +echo diff --git a/scripts/upgrade/win32/py/add_collate.py b/scripts/upgrade/win32/py/add_collate.py new file mode 100644 index 0000000000..b861d2dfb9 --- /dev/null +++ b/scripts/upgrade/win32/py/add_collate.py @@ -0,0 +1,180 @@ +# coding: UTF-8 + +''' +Database Upgrade scripts for seafile windows server 2.0.2 +''' + +import os +import sys +import re +import sqlite3 +import logging +import shutil + +from upgrade_common import seafserv_dir, ccnet_dir, seafile_dir + +# seafserv_dir = '/tmp/haiwen' +# ccnet_dir = os.path.join(seafserv_dir, 'ccnet') +# seafile_dir = os.path.join(seafserv_dir, 'seafile-data') + +def error_exit(msg): + print 'Error: %s' % msg + sys.exit(1) + +class Pattern(object): + def __init__(self, old, new): + self.old = old + self.new = new + +class AbstractDBUpdater(object): + '''Base class to update a database''' + + name = '' + patterns = [] + + def __init__(self, db_path): + self.db_path = db_path + self.lines = [] + self.tmp = self.db_path + '.tmp' + + try: + if os.path.exists(self.tmp): + os.remove(self.tmp) + except: + logging.exception('Error when delete temporary database %s' % self.tmp) + sys.exit(1) + + def do_update(self): + print 'updating %s' % self.name + self.dump_db() + self.update_schema() + self.write_db() + + def dump_db(self): + '''Dump all the schema and data''' + with sqlite3.connect(self.db_path) as conn: + for line in conn.iterdump(): + self.lines.append(line.replace('\n', ' ')) + + def update_schema(self): + '''Update schema of tables in this database to add "collate nocase"''' + new_lines = [] + for line in self.lines: + new_line = line + if line.lower().startswith("create table"): + for pattern in self.patterns: + new_line = re.sub(pattern.old, pattern.new, new_line) + new_lines.append(new_line) + + self.lines = new_lines + + def write_db(self): + with sqlite3.connect(self.tmp) as conn: + cursor = conn.cursor() + for line in self.lines: + if line.lower().strip().strip(';') in ('begin transaction', 'commit'): + continue + cursor.execute(line) + + shutil.copy(self.tmp, self.db_path) + + try: + if os.path.exists(self.tmp): + os.remove(self.tmp) + except: + pass + +class CcnetUserDBUpdater(AbstractDBUpdater): + name = 'user database' + patterns = [ + Pattern(r'(CREATE TABLE EmailUser.*)email TEXT,(.*)', + r'\1email TEXT COLLATE NOCASE,\2'), + + Pattern(r'(CREATE TABLE Binding.*)email TEXT,(.*)', + r'\1email TEXT COLLATE NOCASE,\2'), + ] + + def __init__(self, user_db): + AbstractDBUpdater.__init__(self, user_db) + +class CcnetGroupDBUpdater(AbstractDBUpdater): + name = 'group database' + patterns = [ + Pattern(r'(CREATE TABLE `Group`.*)`creator_name` VARCHAR\(255\),(.*)', + r'\1`creator_name` VARCHAR(255) COLLATE NOCASE,\2'), + Pattern(r'(CREATE TABLE `GroupUser`.*)`user_name` VARCHAR\(255\),(.*)', + r'\1`user_name` VARCHAR(255) COLLATE NOCASE,\2'), + ] + + def __init__(self, group_db): + AbstractDBUpdater.__init__(self, group_db) + +class SeafileDBUpdater(AbstractDBUpdater): + name = 'seafile database' + patterns = [ + Pattern(r'(CREATE TABLE RepoOwner.*)owner_id TEXT(.*)', + r'\1owner_id TEXT COLLATE NOCASE\2'), + + Pattern(r'(CREATE TABLE RepoGroup.*)user_name TEXT,(.*)', + r'\1user_name TEXT COLLATE NOCASE,\2'), + + Pattern(r'(CREATE TABLE RepoUserToken.*)email VARCHAR\(255\),(.*)', + r'\1email VARCHAR(255) COLLATE NOCASE,\2'), + + Pattern(r'(CREATE TABLE UserQuota.*)user VARCHAR\(255\),(.*)', + r'\1user VARCHAR(255) COLLATE NOCASE,\2' ), + + Pattern(r'(CREATE TABLE SharedRepo.*)from_email VARCHAR\(512\), to_email VARCHAR\(512\),(.*)', + r'\1from_email VARCHAR(512), to_email VARCHAR(512) COLLATE NOCASE,\2'), + ] + + def __init__(self, seafile_db): + AbstractDBUpdater.__init__(self, seafile_db) + +class SeahubDBUpdater(AbstractDBUpdater): + name = 'seahub database' + patterns = [ + Pattern(r'(CREATE TABLE "notifications_usernotification".*)"to_user" varchar\(255\) NOT NULL,(.*)', + r'\1"to_user" varchar(255) NOT NULL COLLATE NOCASE,\2'), + + Pattern(r'(CREATE TABLE "profile_profile".*)"user" varchar\(75\) NOT NULL UNIQUE,(.*)', + r'\1"user" varchar(75) NOT NULL UNIQUE COLLATE NOCASE,\2'), + + Pattern(r'(CREATE TABLE "share_fileshare".*)"username" varchar\(255\) NOT NULL,(.*)', + r'\1"username" varchar(255) NOT NULL COLLATE NOCASE,\2'), + + Pattern(r'(CREATE TABLE "api2_token".*)"user" varchar\(255\) NOT NULL UNIQUE,(.*)', + r'\1"user" varchar(255) NOT NULL UNIQUE COLLATE NOCASE,\2'), + + Pattern(r'(CREATE TABLE "wiki_personalwiki".*)"username" varchar\(255\) NOT NULL UNIQUE,(.*)', + r'\1"username" varchar(255) NOT NULL UNIQUE COLLATE NOCASE,\2'), + + Pattern(r'(CREATE TABLE "message_usermessage".*)"from_email" varchar\(75\) NOT NULL,\s*"to_email" varchar\(75\) NOT NULL,(.*)', + r'\1"from_email" varchar(75) NOT NULL COLLATE NOCASE, "to_email" varchar(75) NOT NULL COLLATE NOCASE,\2'), + + Pattern(r'(CREATE TABLE "avatar_avatar".*)"emailuser" varchar\(255\) NOT NULL,(.*)', + r'\1"emailuser" varchar(255) NOT NULL COLLATE NOCASE,\2'), + ] + + def __init__(self, seahub_db): + AbstractDBUpdater.__init__(self, seahub_db) + +def upgrade_collate(): + '''Update database schema to add "COLLATE NOCASE" of email field''' + user_db = os.path.join(ccnet_dir, 'PeerMgr', 'usermgr.db') + group_db = os.path.join(ccnet_dir, 'GroupMgr', 'groupmgr.db') + seafile_db = os.path.join(seafile_dir, 'seafile.db') + seahub_db = os.path.join(seafserv_dir, 'seahub.db') + updaters = [ + CcnetUserDBUpdater(user_db), + CcnetGroupDBUpdater(group_db), + SeafileDBUpdater(seafile_db), + SeahubDBUpdater(seahub_db), + ] + + for updater in updaters: + updater.do_update() + + +if __name__ == '__main__': + upgrade_collate() diff --git a/scripts/upgrade/win32/py/gc.py b/scripts/upgrade/win32/py/gc.py new file mode 100644 index 0000000000..c609f4a105 --- /dev/null +++ b/scripts/upgrade/win32/py/gc.py @@ -0,0 +1,40 @@ +# coding: UTF-8 + +import os +import sys +import traceback +import ccnet + +from upgrade_common import install_path, seafile_dir, ccnet_dir, run_argv, ensure_server_not_running, central_config_dir + + +def call_seafserv_gc(): + args = [ + os.path.join(install_path, 'seafile', 'bin', 'seafserv-gc.exe'), + '-c', + ccnet_dir, + '-d', + seafile_dir, + '-F', + central_config_dir, + ] + + print 'Starting gc...\n' + run_argv(args) + + +def main(): + try: + ensure_server_not_running() + call_seafserv_gc() + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_1.7_1.8.py b/scripts/upgrade/win32/py/upgrade_1.7_1.8.py new file mode 100644 index 0000000000..23bd1dc74b --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_1.7_1.8.py @@ -0,0 +1,17 @@ +# coding: UTF-8 + +from upgrade_common import upgrade_db + +def main(): + try: + upgrade_db('1.8.0') + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_1.8_2.0.py b/scripts/upgrade/win32/py/upgrade_1.8_2.0.py new file mode 100644 index 0000000000..99aac3dd60 --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_1.8_2.0.py @@ -0,0 +1,19 @@ +# coding: UTF-8 + +from upgrade_common import upgrade_db +from add_collate import upgrade_collate + +def main(): + try: + upgrade_db('2.0.0') + upgrade_collate() + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_2.0_2.1.py b/scripts/upgrade/win32/py/upgrade_2.0_2.1.py new file mode 100644 index 0000000000..a7f76e1891 --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_2.0_2.1.py @@ -0,0 +1,31 @@ +# coding: UTF-8 + +import os +import glob +import shutil + +from upgrade_common import install_path, seafile_dir, upgrade_db + +def copy_template_library(): + src_docs_dir = os.path.join(install_path, 'seafile', 'docs') + library_template_dir= os.path.join(seafile_dir, 'library-template') + if not os.path.exists(library_template_dir): + os.mkdir(library_template_dir) + + for fn in glob.glob(os.path.join(src_docs_dir, '*.doc')): + shutil.copy(fn, library_template_dir) + +def main(): + try: + upgrade_db('2.1.0') + copy_template_library() + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_2.1_3.0.py b/scripts/upgrade/win32/py/upgrade_2.1_3.0.py new file mode 100644 index 0000000000..dc11f0b4c8 --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_2.1_3.0.py @@ -0,0 +1,32 @@ +# coding: UTF-8 + +import os + +from upgrade_common import install_path, ccnet_dir, seafile_dir, upgrade_db, run_argv + +def do_migrate_storage(): + '''use seaf-migrate to migrate objects from the 2.1 layout to 3.0 layout''' + args = [ + os.path.join(install_path, 'seafile', 'bin', 'seaf-migrate.exe'), + '-c', ccnet_dir, + '-d', seafile_dir, + ] + + print 'Starting migrate your data...\n' + if run_argv(args) != 0: + raise Exception('failed to migrate seafile data to 3.0 format') + +def main(): + try: + upgrade_db('3.0.0') + do_migrate_storage() + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_3.0_3.1.py b/scripts/upgrade/win32/py/upgrade_3.0_3.1.py new file mode 100644 index 0000000000..8bb2b89bbf --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_3.0_3.1.py @@ -0,0 +1,19 @@ +# coding: UTF-8 + +import os + +from upgrade_common import install_path, ccnet_dir, seafile_dir, upgrade_db, run_argv + +def main(): + try: + upgrade_db('3.1.0') + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_3.1_4.0.py b/scripts/upgrade/win32/py/upgrade_3.1_4.0.py new file mode 100644 index 0000000000..acdda5362e --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_3.1_4.0.py @@ -0,0 +1,19 @@ +# coding: UTF-8 + +import os + +from upgrade_common import install_path, ccnet_dir, seafile_dir, upgrade_db, run_argv + +def main(): + try: + upgrade_db('4.0.0') + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_4.0_4.1.py b/scripts/upgrade/win32/py/upgrade_4.0_4.1.py new file mode 100644 index 0000000000..6eb3f9e296 --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_4.0_4.1.py @@ -0,0 +1,19 @@ +# coding: UTF-8 + +import os + +from upgrade_common import install_path, ccnet_dir, seafile_dir, upgrade_db, run_argv + +def main(): + try: + upgrade_db('4.1.0') + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_4.1_4.2.py b/scripts/upgrade/win32/py/upgrade_4.1_4.2.py new file mode 100644 index 0000000000..43d9986b60 --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_4.1_4.2.py @@ -0,0 +1,19 @@ +# coding: UTF-8 + +import os + +from upgrade_common import install_path, ccnet_dir, seafile_dir, upgrade_db, run_argv + +def main(): + try: + upgrade_db('4.2.0') + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_4.2_4.3.py b/scripts/upgrade/win32/py/upgrade_4.2_4.3.py new file mode 100644 index 0000000000..17829180d7 --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_4.2_4.3.py @@ -0,0 +1,19 @@ +# coding: UTF-8 + +import os + +from upgrade_common import install_path, ccnet_dir, seafile_dir, upgrade_db, run_argv + +def main(): + try: + upgrade_db('4.3.0') + except Exception, e: + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_4.3_5.0.py b/scripts/upgrade/win32/py/upgrade_4.3_5.0.py new file mode 100644 index 0000000000..0c80731478 --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_4.3_5.0.py @@ -0,0 +1,49 @@ +# coding: UTF-8 + +import shutil +import os +import traceback +from os.path import abspath, basename, exists, dirname, join +from upgrade_common import (install_path, seafserv_dir, ccnet_dir, seafile_dir, + upgrade_db, run_argv) + + +def move_all_conf_to_central_config_dir(): + central_config_dir = join(seafserv_dir, 'conf') + if not exists(central_config_dir): + os.mkdir(central_config_dir) + files = [ + join(ccnet_dir, 'ccnet.conf'), + join(seafile_dir, 'seafile.conf'), + join(seafserv_dir, 'seahub_settings.py'), + ] + for fn in files: + if not exists(fn): + raise RuntimeError('file %s does not exist' % fn) + for fn in files: + with open(fn, 'r') as fp: + if 'This file has been moved' in fp.read(): + return + dstfile = join(central_config_dir, basename(fn)) + shutil.copyfile(fn, dstfile) + with open(fn, 'w') as fp: + content = '# This file has been moved to %s in seafile 5.0.0' % dstfile + fp.write(content) + + +def main(): + try: + upgrade_db('5.0.0') + move_all_conf_to_central_config_dir() + except Exception, e: + traceback.print_exc() + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_5.0_5.1.py b/scripts/upgrade/win32/py/upgrade_5.0_5.1.py new file mode 100644 index 0000000000..298c07e986 --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_5.0_5.1.py @@ -0,0 +1,24 @@ +# coding: UTF-8 + +import shutil +import os +import traceback +from os.path import abspath, basename, exists, dirname, join +from upgrade_common import (install_path, seafserv_dir, ccnet_dir, seafile_dir, + upgrade_db, run_argv) + +def main(): + try: + upgrade_db('5.1.0') + except Exception, e: + traceback.print_exc() + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_5.1_6.0.py b/scripts/upgrade/win32/py/upgrade_5.1_6.0.py new file mode 100644 index 0000000000..ad667c249c --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_5.1_6.0.py @@ -0,0 +1,24 @@ +# coding: UTF-8 + +import shutil +import os +import traceback +from os.path import abspath, basename, exists, dirname, join +from upgrade_common import (install_path, seafserv_dir, ccnet_dir, seafile_dir, + upgrade_db, run_argv) + +def main(): + try: + upgrade_db('6.0.0') + except Exception, e: + traceback.print_exc() + print 'Error:\n', e + else: + print '\ndone\n' + finally: + print '\nprint ENTER to exit\n' + raw_input() + + +if __name__ == '__main__': + main() diff --git a/scripts/upgrade/win32/py/upgrade_common.py b/scripts/upgrade/win32/py/upgrade_common.py new file mode 100644 index 0000000000..5698b6fd8e --- /dev/null +++ b/scripts/upgrade/win32/py/upgrade_common.py @@ -0,0 +1,155 @@ +# coding: UTF-8 + +import os +import sys +import sqlite3 +import subprocess +import ccnet +import glob + + +# Directory layout: +# +# - SeafileProgram/ +# - seafserv.ini +# - seafile-server-1.7.0/ +# - seafile-server-1.8.0/ +# - seafile-server-1.9.0/ +# - upgrade/ +# - sql/ +# - 1.8.0/ +# - sqlite3 +# - ccnet.sql +# - seafile.sql +# - seahub.sql +# - upgrade_1.7_1.8.bat +# - upgrade_1.8_1.9.bat +# - py/ +# - upgrade_1.7_1.8.py +# - upgrade_1.8_1.9.py + +pyscript_dir = os.path.dirname(os.path.abspath(__file__)) +upgrade_dir = os.path.dirname(pyscript_dir) +sql_dir = os.path.join(upgrade_dir, 'sql') +install_path = os.path.dirname(upgrade_dir) +program_top_dir = os.path.dirname(install_path) + +seafserv_dir = '' +ccnet_dir = '' +seafile_dir = '' +central_config_dir = '' + +def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): + '''Run a program and wait it to finish, and return its exit code. The + standard output of this program is supressed. + + ''' + with open(os.devnull, 'w') as devnull: + if suppress_stdout: + stdout = devnull + else: + stdout = sys.stdout + + if suppress_stderr: + stderr = devnull + else: + stderr = sys.stderr + + proc = subprocess.Popen(argv, + cwd=cwd, + stdout=stdout, + stderr=stderr, + env=env) + return proc.wait() + +def error(message): + print message + sys.exit(1) + +def read_seafserv_dir(): + global seafserv_dir, ccnet_dir, seafile_dir, central_config_dir + seafserv_ini = os.path.join(program_top_dir, 'seafserv.ini') + if not os.path.exists(seafserv_ini): + error('%s not found' % seafserv_ini) + + with open(seafserv_ini, 'r') as fp: + seafserv_dir = fp.read().strip() + + ccnet_dir = os.path.join(seafserv_dir, 'ccnet') + seafile_dir = os.path.join(seafserv_dir, 'seafile-data') + central_config_dir = os.path.join(seafserv_dir, 'conf') + +def apply_sqls(db_path, sql_path): + with open(sql_path, 'r') as fp: + lines = fp.read().split(';') + + with sqlite3.connect(db_path) as conn: + for line in lines: + line = line.strip() + if not line: + continue + else: + conn.execute(line) + +def _get_ccnet_db(ccnet_dir, dbname): + dbs = ( + 'ccnet.db', + 'GroupMgr/groupmgr.db', + 'misc/config.db', + 'OrgMgr/orgmgr.db', + ) + for db in dbs: + if os.path.splitext(os.path.basename(db))[0] == dbname: + return os.path.join(ccnet_dir, db) + +def _handle_ccnet_sqls(version): + for sql_path in glob.glob(os.path.join(sql_dir, version, 'sqlite3', 'ccnet', '*.sql')): + dbname = os.path.splitext(os.path.basename(sql_path))[0] + apply_sqls(_get_ccnet_db(ccnet_dir, dbname), sql_path) + +def upgrade_db(version): + ensure_server_not_running() + print 'upgrading databases ...' + ccnet_db = os.path.join(ccnet_dir, 'ccnet.db') + seafile_db = os.path.join(seafile_dir, 'seafile.db') + seahub_db = os.path.join(seafserv_dir, 'seahub.db') + + def get_sql(prog): + ret = os.path.join(sql_dir, version, 'sqlite3', '%s.sql' % prog) + return ret + + ccnet_sql = get_sql('ccnet') + seafile_sql = get_sql('seafile') + seahub_sql = get_sql('seahub') + + if os.path.exists(ccnet_sql): + print ' upgrading ccnet databases ...' + apply_sqls(ccnet_db, ccnet_sql) + _handle_ccnet_sqls(version) + + if os.path.exists(seafile_sql): + print ' upgrading seafile databases ...' + apply_sqls(seafile_db, seafile_sql) + + if os.path.exists(seahub_sql): + print ' upgrading seahub databases ...' + apply_sqls(seahub_db, seahub_sql) + +def get_current_version(): + return os.path.basename(install_path).split('-')[-1] + +def ensure_server_not_running(): + if os.path.exists(os.path.join(central_config_dir, 'ccnet.conf')): + client = ccnet.SyncClient(ccnet_dir, + central_config_dir=central_config_dir) + else: + client = ccnet.SyncClient(ccnet_dir) + try: + client.connect_daemon() + except ccnet.NetworkError: + pass + else: + raise Exception('Seafile server is running! You must turn it off before running this script!') + + +read_seafserv_dir() diff --git a/scripts/upgrade/win32/upgrade_1.7_1.8.bat b/scripts/upgrade/win32/upgrade_1.7_1.8.bat new file mode 100644 index 0000000000..058b6e089e --- /dev/null +++ b/scripts/upgrade/win32/upgrade_1.7_1.8.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_1.7_1.8.py diff --git a/scripts/upgrade/win32/upgrade_1.8_2.0.bat b/scripts/upgrade/win32/upgrade_1.8_2.0.bat new file mode 100644 index 0000000000..1a9f09becc --- /dev/null +++ b/scripts/upgrade/win32/upgrade_1.8_2.0.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_1.8_2.0.py diff --git a/scripts/upgrade/win32/upgrade_2.0_2.1.bat b/scripts/upgrade/win32/upgrade_2.0_2.1.bat new file mode 100644 index 0000000000..043be48019 --- /dev/null +++ b/scripts/upgrade/win32/upgrade_2.0_2.1.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_2.0_2.1.py diff --git a/scripts/upgrade/win32/upgrade_2.1_3.0.bat b/scripts/upgrade/win32/upgrade_2.1_3.0.bat new file mode 100644 index 0000000000..cb07cabb1a --- /dev/null +++ b/scripts/upgrade/win32/upgrade_2.1_3.0.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_2.1_3.0.py diff --git a/scripts/upgrade/win32/upgrade_3.0_3.1.bat b/scripts/upgrade/win32/upgrade_3.0_3.1.bat new file mode 100644 index 0000000000..a26c33c7f6 --- /dev/null +++ b/scripts/upgrade/win32/upgrade_3.0_3.1.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_3.0_3.1.py diff --git a/scripts/upgrade/win32/upgrade_3.1_4.0.bat b/scripts/upgrade/win32/upgrade_3.1_4.0.bat new file mode 100644 index 0000000000..7b9afac7ad --- /dev/null +++ b/scripts/upgrade/win32/upgrade_3.1_4.0.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_3.1_4.0.py diff --git a/scripts/upgrade/win32/upgrade_4.0_4.1.bat b/scripts/upgrade/win32/upgrade_4.0_4.1.bat new file mode 100644 index 0000000000..023dd505d7 --- /dev/null +++ b/scripts/upgrade/win32/upgrade_4.0_4.1.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_4.0_4.1.py diff --git a/scripts/upgrade/win32/upgrade_4.1_4.2.bat b/scripts/upgrade/win32/upgrade_4.1_4.2.bat new file mode 100644 index 0000000000..064a43a469 --- /dev/null +++ b/scripts/upgrade/win32/upgrade_4.1_4.2.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_4.1_4.2.py diff --git a/scripts/upgrade/win32/upgrade_4.2_4.3.bat b/scripts/upgrade/win32/upgrade_4.2_4.3.bat new file mode 100644 index 0000000000..550bc61fb5 --- /dev/null +++ b/scripts/upgrade/win32/upgrade_4.2_4.3.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_4.2_4.3.py diff --git a/scripts/upgrade/win32/upgrade_4.3_5.0.bat b/scripts/upgrade/win32/upgrade_4.3_5.0.bat new file mode 100644 index 0000000000..cb4d01d99f --- /dev/null +++ b/scripts/upgrade/win32/upgrade_4.3_5.0.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_4.3_5.0.py diff --git a/scripts/upgrade/win32/upgrade_5.0_5.1.bat b/scripts/upgrade/win32/upgrade_5.0_5.1.bat new file mode 100644 index 0000000000..2c9e64a65c --- /dev/null +++ b/scripts/upgrade/win32/upgrade_5.0_5.1.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_5.0_5.1.py diff --git a/scripts/upgrade/win32/upgrade_5.1_6.0.bat b/scripts/upgrade/win32/upgrade_5.1_6.0.bat new file mode 100644 index 0000000000..f73ba04f4b --- /dev/null +++ b/scripts/upgrade/win32/upgrade_5.1_6.0.bat @@ -0,0 +1,4 @@ +@echo off +cd /d %~dp0 +set PYTHONPATH=%PYTHONPATH%;%~dp0\..\seahub\thirdpart +start python py/upgrade_5.1_6.0.py