diff --git a/scripts/build/build-pro.py b/scripts/build/build-pro.py
new file mode 100755
index 0000000000..f95d5d22b9
--- /dev/null
+++ b/scripts/build/build-pro.py
@@ -0,0 +1,1127 @@
+#!/usr/bin/env python3
+# coding: UTF-8
+'''This scirpt builds the Seafile Server Profession tarball.
+
+Some notes:
+
+1. The working directory is always the 'builddir'. 'os.chdir' is only called
+to change to the 'builddir'. We make use of the 'cwd' argument in
+'subprocess.Popen' to run a command in a specific directory.
+
+2. django/djangorestframework/djblets/gunicorn/flup must be easy_install-ed to
+a directory before run this script. That directory is passed in as the
+'--thirdpartdir' arguments.
+
+3. These components must be easy_installed to a --prolibsdir
+      - sqlalchemy
+      - thrift
+      - elasticsearch
+      - elasticsearch-dsl
+      - argparse
+      - python-daemon
+      - lockfile
+'''
+import sys
+
+####################
+### Requires Python 3
+####################
+if sys.version_info[0] != 3:
+    print('Python 3 is required. Quit now.')
+    sys.exit(1)
+
+import os
+import glob
+import subprocess
+import tempfile
+import shutil
+import re
+import subprocess
+import optparse
+import atexit
+
+####################
+### Global variables
+####################
+
+# command line configuartion
+conf = {}
+
+# key names in the conf dictionary.
+CONF_VERSION = 'version'
+CONF_SEAFILE_VERSION = 'seafile_version'
+CONF_LIBSEARPC_VERSION = 'libsearpc_version'
+CONF_CCNET_VERSION = 'ccnet_version'
+CONF_SRCDIR = 'srcdir'
+CONF_KEEP = 'keep'
+CONF_BUILDDIR = 'builddir'
+CONF_OUTPUTDIR = 'outputdir'
+CONF_THIRDPARTDIR = 'thirdpartdir'
+CONF_PROLIBSDIR = 'prolibsdir'
+CONF_NO_STRIP = 'nostrip'
+CONF_NO_CEPH = 'no-s3'
+CONF_YES = 'yes'
+CONF_JOBS = 'jobs'
+CONF_MYSQL_CONFIG       = 'mysql_config'
+CONF_BRAND = 'brand'
+
+####################
+### Common helper functions
+####################
+def highlight(content, is_error=False):
+    '''Add ANSI color to content to get it highlighted on terminal'''
+    if is_error:
+        return '\x1b[1;31m%s\x1b[m' % content
+    else:
+        return '\x1b[1;32m%s\x1b[m' % content
+
+
+def info(msg):
+    print(highlight('[INFO] ') + msg)
+
+
+def find_in_path(prog):
+    '''Find a file in system path'''
+    dirs = os.environ['PATH'].split(':')
+    for d in dirs:
+        if d == '':
+            continue
+        path = os.path.join(d, prog)
+        if os.path.exists(path):
+            return path
+
+    return None
+
+
+def error(msg=None, usage=None):
+    if msg:
+        print(highlight('[ERROR] ') + msg)
+    if usage:
+        print(usage)
+    sys.exit(1)
+
+
+def run_argv(argv,
+             cwd=None,
+             env=None,
+             suppress_stdout=False,
+             suppress_stderr=False):
+    '''Run a program and wait it to finish, and return its exit code. The
+    standard output of this program is supressed.
+
+    '''
+    with open(os.devnull, 'w') as devnull:
+        if suppress_stdout:
+            stdout = devnull
+        else:
+            stdout = sys.stdout
+
+        if suppress_stderr:
+            stderr = devnull
+        else:
+            stderr = sys.stderr
+
+        proc = subprocess.Popen(argv,
+                                cwd=cwd,
+                                stdout=stdout,
+                                stderr=stderr,
+                                env=env)
+        return proc.wait()
+
+
+def run(cmdline,
+        cwd=None,
+        env=None,
+        suppress_stdout=False,
+        suppress_stderr=False):
+    '''Like run_argv but specify a command line string instead of argv'''
+    with open(os.devnull, 'w') as devnull:
+        if suppress_stdout:
+            stdout = devnull
+        else:
+            stdout = sys.stdout
+
+        if suppress_stderr:
+            stderr = devnull
+        else:
+            stderr = sys.stderr
+
+        proc = subprocess.Popen(cmdline,
+                                cwd=cwd,
+                                stdout=stdout,
+                                stderr=stderr,
+                                env=env,
+                                shell=True)
+        return proc.wait()
+
+
+def must_mkdir(path):
+    '''Create a directory, exit on failure'''
+    if os.path.exists(path):
+        return
+
+    try:
+        os.makedirs(path)
+    except OSError as e:
+        error('failed to create directory %s:%s' % (path, e))
+
+
+def must_copy(src, dst):
+    '''Copy src to dst, exit on failure'''
+    try:
+        shutil.copy(src, dst)
+    except Exception as e:
+        error('failed to copy %s to %s: %s' % (src, dst, e))
+
+
+def must_copytree(src, dst):
+    '''must_copytree(a, b) copies every file/dir under a/ to b/'''
+    try:
+        for name in os.listdir(src):
+            src_path = os.path.join(src, name)
+            target_path = os.path.join(dst, name)
+            if os.path.isdir(src_path):
+                shutil.copytree(src_path, target_path)
+            else:
+                shutil.copy(src_path, target_path)
+    except Exception as e:
+        error('failed to copy seahub thirdpart libs: %s' % e)
+
+
+class Project(object):
+    '''Base class for a project'''
+    # Probject name, i.e. libseaprc/ccnet/seafile/seahub
+    name = ''
+
+    # A list of shell commands to configure/build the project
+    build_commands = []
+
+    def __init__(self):
+        # the path to pass to --prefix=/<prefix>
+        self.prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server',
+                                   'seafile')
+        self.version = self.get_version()
+        self.src_tarball = os.path.join(conf[CONF_SRCDIR], '%s-%s.tar.gz' %
+                                        (self.name, self.version))
+        # project dir, like <builddir>/seafile-1.2.2/
+        self.projdir = os.path.join(conf[CONF_BUILDDIR], '%s-%s' %
+                                    (self.name, self.version))
+
+    def get_version(self):
+        # libsearpc and ccnet can have different versions from seafile.
+        raise NotImplementedError
+
+    def uncompress(self):
+        '''Uncompress the source from the tarball'''
+        info('Uncompressing %s' % self.name)
+
+        if run('tar xf %s' % self.src_tarball) < 0:
+            error('failed to uncompress source of %s' % self.name)
+
+    def build(self):
+        '''Build the source'''
+        info('Building %s' % self.name)
+        for cmd in self.build_commands:
+            if run(cmd, cwd=self.projdir) != 0:
+                error('error when running command:\n\t%s\n' % cmd)
+
+
+class Libsearpc(Project):
+    name = 'libsearpc'
+
+    def __init__(self):
+        Project.__init__(self)
+        self.build_commands = [
+            './configure --prefix=%s' % self.prefix,
+            'make -j%s' % conf[CONF_JOBS], 'make install'
+        ]
+
+    def get_version(self):
+        return conf[CONF_LIBSEARPC_VERSION]
+
+
+class Ccnet(Project):
+    name = 'ccnet'
+
+    def __init__(self):
+        Project.__init__(self)
+        configure_command = './configure --prefix=%s --enable-ldap' % self.prefix
+        if conf[CONF_MYSQL_CONFIG]:
+            configure_command += ' --with-mysql=%s' % conf[CONF_MYSQL_CONFIG]
+        self.build_commands = [
+            configure_command,
+            'make -j%s' % conf[CONF_JOBS],
+            'make install'
+        ]
+
+    def get_version(self):
+        return conf[CONF_CCNET_VERSION]
+
+
+class Seafile(Project):
+    name = 'seafile'
+
+    def __init__(self):
+        Project.__init__(self)
+
+        configure_command = './configure --prefix=%s --enable-cluster --enable-s3 --enable-ceph' % self.prefix
+        if conf[CONF_MYSQL_CONFIG]:
+            configure_command += ' --with-mysql=%s' % conf[CONF_MYSQL_CONFIG]
+        self.build_commands = [
+            configure_command,
+            'make -j%s' % conf[CONF_JOBS],
+            'make install'
+        ]
+
+    def get_version(self):
+        return conf[CONF_SEAFILE_VERSION]
+
+
+class Seahub(Project):
+    name = 'seahub'
+
+    def __init__(self):
+        Project.__init__(self)
+        # nothing to do for seahub
+        self.build_commands = []
+
+    def get_version(self):
+        return conf[CONF_SEAFILE_VERSION]
+
+    def build(self):
+        self.write_version_to_settings_py()
+
+        Project.build(self)
+
+    def write_version_to_settings_py(self):
+        '''Write the version of current seafile server to seahub'''
+        settings_py = os.path.join(self.projdir, 'seahub', 'settings.py')
+
+        line = '\nSEAFILE_VERSION = "%s"\n' % conf[CONF_VERSION]
+        with open(settings_py, 'a+') as fp:
+            fp.write(line)
+
+
+def check_seahub_thirdpart(thirdpartdir):
+    '''The ${thirdpartdir} must have django/djblets/gunicorn pre-installed. So
+    we can copy it to seahub/thirdpart
+
+    '''
+    thirdpart_libs = [
+        'Django',
+#        'Djblets',
+        'gunicorn',
+        #'flup',
+        'chardet',
+        'python_dateutil',
+        #'django_picklefield',
+        #'django_constance',
+        # 'SQLAlchemy',
+        # 'python_daemon',
+        # 'lockfile',
+        'six',
+    ]
+
+    def check_thirdpart_lib(name):
+        name += '*'
+        if not glob.glob(os.path.join(thirdpartdir, name)):
+            error('%s not find in %s' % (name, thirdpartdir))
+
+    for lib in thirdpart_libs:
+        check_thirdpart_lib(lib)
+
+
+def check_pro_libs(prolibsdir):
+    '''The ${prolibsdir} must have pro libs installed.'''
+    pro_libs = [
+        'argparse',
+        'elasticsearch_dsl',
+        'SQLAlchemy',
+        'thrift',
+    ]
+
+    def check_pro_lib(name):
+        name += '*'
+        if not glob.glob(os.path.join(prolibsdir, name)):
+            error('%s not find in %s' % (name, prolibsdir))
+
+    for lib in pro_libs:
+        check_pro_lib(lib)
+
+
+def check_targz_src(proj, version, srcdir):
+    src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version))
+    if not os.path.exists(src_tarball):
+        error('%s not exists' % src_tarball)
+
+
+def check_targz_src_no_version(proj, srcdir):
+    src_tarball = os.path.join(srcdir, '%s.tar.gz' % proj)
+    if not os.path.exists(src_tarball):
+        error('%s not exists' % src_tarball)
+
+
+def check_pdf2htmlEX():
+    pdf2htmlEX_executable = find_in_path('pdf2htmlEX')
+    if pdf2htmlEX_executable is None:
+        error('pdf2htmlEX not found')
+
+
+def validate_args(usage, options):
+    required_args = [
+        CONF_VERSION,
+        CONF_LIBSEARPC_VERSION,
+        CONF_CCNET_VERSION,
+        CONF_SEAFILE_VERSION,
+        CONF_SRCDIR,
+        CONF_THIRDPARTDIR,
+        CONF_PROLIBSDIR,
+    ]
+
+    # fist check required args
+    for optname in required_args:
+        if getattr(options, optname, None) == None:
+            error('%s must be specified' % optname, usage=usage)
+
+    def get_option(optname):
+        return getattr(options, optname)
+
+    # [ version ]
+    def check_project_version(version):
+        '''A valid version must be like 1.2.2, 1.3'''
+        if not re.match('^([0-9])+(\.([0-9])+)+$', version):
+            error('%s is not a valid version' % version, usage=usage)
+
+    version = get_option(CONF_VERSION)
+    seafile_version = get_option(CONF_SEAFILE_VERSION)
+    libsearpc_version = get_option(CONF_LIBSEARPC_VERSION)
+    ccnet_version = get_option(CONF_CCNET_VERSION)
+
+    check_project_version(version)
+    check_project_version(libsearpc_version)
+    check_project_version(ccnet_version)
+    check_project_version(seafile_version)
+
+    # [ srcdir ]
+    srcdir = get_option(CONF_SRCDIR)
+    check_targz_src('libsearpc', libsearpc_version, srcdir)
+    check_targz_src('ccnet', ccnet_version, srcdir)
+    check_targz_src('seafile', seafile_version, srcdir)
+    check_targz_src('seahub', seafile_version, srcdir)
+
+    check_targz_src_no_version('seafes', srcdir)
+    check_targz_src_no_version('seafevents', srcdir)
+    check_targz_src_no_version('seahub-extra', srcdir)
+    check_targz_src_no_version('libevent', srcdir)
+    check_targz_src_no_version('elasticsearch', srcdir)
+    check_targz_src_no_version('seafdav', srcdir)
+    check_targz_src_no_version('seafobj', srcdir)
+
+    check_pdf2htmlEX()
+
+    # [ builddir ]
+    builddir = get_option(CONF_BUILDDIR)
+    if not os.path.exists(builddir):
+        error('%s does not exist' % builddir, usage=usage)
+
+    builddir = os.path.join(builddir, 'seafile-pro-server-build')
+
+    # [ thirdpartdir ]
+    thirdpartdir = get_option(CONF_THIRDPARTDIR)
+    check_seahub_thirdpart(thirdpartdir)
+
+    # [ prolibsdir ]
+    prolibsdir = get_option(CONF_PROLIBSDIR)
+    check_pro_libs(prolibsdir)
+
+    # [ outputdir ]
+    outputdir = get_option(CONF_OUTPUTDIR)
+    if outputdir:
+        if not os.path.exists(outputdir):
+            error('outputdir %s does not exist' % outputdir, usage=usage)
+    else:
+        outputdir = os.getcwd()
+
+    # [ keep ]
+    keep = get_option(CONF_KEEP)
+
+    # [ no strip]
+    nostrip = get_option(CONF_NO_STRIP)
+
+    # [ YES ]
+    yes = get_option(CONF_YES)
+
+    # [ JOBS ]
+    jobs = get_option(CONF_JOBS)
+
+    # [no ceph]
+    no_ceph = get_option(CONF_NO_CEPH)
+
+    mysql_config_path = get_option(CONF_MYSQL_CONFIG)
+
+    brand = get_option(CONF_BRAND)
+
+    conf[CONF_VERSION] = version
+    conf[CONF_LIBSEARPC_VERSION] = libsearpc_version
+    conf[CONF_SEAFILE_VERSION] = seafile_version
+    conf[CONF_CCNET_VERSION] = ccnet_version
+
+    conf[CONF_BUILDDIR] = builddir
+    conf[CONF_SRCDIR] = srcdir
+    conf[CONF_OUTPUTDIR] = outputdir
+    conf[CONF_KEEP] = keep
+    conf[CONF_THIRDPARTDIR] = thirdpartdir
+    conf[CONF_PROLIBSDIR] = prolibsdir
+    conf[CONF_NO_STRIP] = nostrip
+    conf[CONF_YES] = yes
+    conf[CONF_JOBS] = jobs
+    conf[CONF_NO_CEPH] = no_ceph
+    conf[CONF_MYSQL_CONFIG] = mysql_config_path
+    conf[CONF_BRAND] = brand
+
+    if os.path.exists(builddir):
+        error('the builddir %s already exists' % builddir)
+
+    show_build_info()
+    prepare_builddir(builddir)
+
+
+def show_build_info():
+    '''Print all conf information. Confirm before continue.'''
+    info('------------------------------------------')
+    info('Seafile Server Professional %s: BUILD INFO' % conf[CONF_VERSION])
+    info('------------------------------------------')
+    info('seafile:          %s' % conf[CONF_SEAFILE_VERSION])
+    info('ccnet:            %s' % conf[CONF_CCNET_VERSION])
+    info('libsearpc:        %s' % conf[CONF_LIBSEARPC_VERSION])
+    info('builddir:         %s' % conf[CONF_BUILDDIR])
+    info('outputdir:        %s' % conf[CONF_OUTPUTDIR])
+    info('source dir:       %s' % conf[CONF_SRCDIR])
+    info('thirdpart dir:    %s' % conf[CONF_THIRDPARTDIR])
+    info('pro libs dir:     %s' % conf[CONF_PROLIBSDIR])
+    info('ceph support:     %s' % (not conf[CONF_NO_CEPH]))
+    info('strip symbols:    %s' % (not conf[CONF_NO_STRIP]))
+    info('jobs:             %s' % conf[CONF_JOBS])
+    info('clean on exit:    %s' % (not conf[CONF_KEEP]))
+    if conf[CONF_YES]:
+        return
+    info('------------------------------------------')
+    info('press any key to continue ')
+    info('------------------------------------------')
+    dummy = input()
+
+
+def prepare_builddir(builddir):
+    must_mkdir(builddir)
+
+    if not conf[CONF_KEEP]:
+
+        def remove_builddir():
+            '''Remove the builddir when exit'''
+            info('remove builddir before exit')
+            shutil.rmtree(builddir, ignore_errors=True)
+
+        atexit.register(remove_builddir)
+
+    os.chdir(builddir)
+
+    must_mkdir(os.path.join(builddir, 'seafile-server'))
+    must_mkdir(os.path.join(builddir, 'seafile-server', 'seafile'))
+
+
+def parse_args():
+    parser = optparse.OptionParser()
+
+    def long_opt(opt):
+        return '--' + opt
+
+    parser.add_option(
+        long_opt(CONF_THIRDPARTDIR),
+        dest=CONF_THIRDPARTDIR,
+        nargs=1,
+        help='where to find the thirdpart libs for seahub')
+
+    parser.add_option(
+        long_opt(CONF_PROLIBSDIR),
+        dest=CONF_PROLIBSDIR,
+        nargs=1,
+        help='where to find the python libs for seafile professional')
+
+    parser.add_option(
+        long_opt(CONF_VERSION),
+        dest=CONF_VERSION,
+        nargs=1,
+        help=
+        'the version to build. Must be digits delimited by dots, like 1.3.0')
+
+    parser.add_option(
+        long_opt(CONF_SEAFILE_VERSION),
+        dest=CONF_SEAFILE_VERSION,
+        nargs=1,
+        help=
+        'the version of seafile as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
+
+    parser.add_option(
+        long_opt(CONF_LIBSEARPC_VERSION),
+        dest=CONF_LIBSEARPC_VERSION,
+        nargs=1,
+        help=
+        'the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
+
+    parser.add_option(
+        long_opt(CONF_CCNET_VERSION),
+        dest=CONF_CCNET_VERSION,
+        nargs=1,
+        help=
+        'the version of ccnet as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
+
+    parser.add_option(
+        long_opt(CONF_BUILDDIR),
+        dest=CONF_BUILDDIR,
+        nargs=1,
+        help='the directory to build the source. Defaults to /tmp',
+        default=tempfile.gettempdir())
+
+    parser.add_option(
+        long_opt(CONF_OUTPUTDIR),
+        dest=CONF_OUTPUTDIR,
+        nargs=1,
+        help=
+        'the output directory to put the generated server tarball. Defaults to the current directory.',
+        default=os.getcwd())
+
+    parser.add_option(
+        long_opt(CONF_SRCDIR),
+        dest=CONF_SRCDIR,
+        nargs=1,
+        help='''Source tarballs must be placed in this directory.''')
+
+    parser.add_option(
+        long_opt(CONF_KEEP),
+        dest=CONF_KEEP,
+        action='store_true',
+        help=
+        '''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''')
+
+    parser.add_option(
+        long_opt(CONF_NO_STRIP),
+        dest=CONF_NO_STRIP,
+        action='store_true',
+        help='''do not strip debug symbols''')
+
+    parser.add_option(
+        long_opt(CONF_YES),
+        dest=CONF_YES,
+        action='store_true',
+        help='''assume yes to all questions''')
+
+    parser.add_option(long_opt(CONF_JOBS), dest=CONF_JOBS, default=2, type=int)
+
+    parser.add_option(
+        long_opt(CONF_NO_CEPH),
+        dest=CONF_NO_CEPH,
+        action='store_true',
+        help='''do not enable storage backends''')
+
+    parser.add_option(long_opt(CONF_MYSQL_CONFIG),
+                      dest=CONF_MYSQL_CONFIG,
+                      nargs=1,
+                      help='''Absolute path to mysql_config or mariadb_config program.''')
+
+    parser.add_option(long_opt(CONF_BRAND),
+                      dest=CONF_BRAND,
+                      default='',
+                      help='''brand name of the package''')
+
+    usage = parser.format_help()
+    options, remain = parser.parse_args()
+    if remain:
+        error(usage=usage)
+
+    validate_args(usage, options)
+
+
+def setup_build_env():
+    '''Setup environment variables, such as export PATH=$BUILDDDIR/bin:$PATH'''
+    prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile')
+
+    def prepend_env_value(name, value, seperator=':'):
+        '''append a new value to a list'''
+        try:
+            current_value = os.environ[name]
+        except KeyError:
+            current_value = ''
+
+        new_value = value
+        if current_value:
+            new_value += seperator + current_value
+
+        os.environ[name] = new_value
+
+    prepend_env_value('CPPFLAGS',
+                      '-I%s' % os.path.join(prefix, 'include'),
+                      seperator=' ')
+
+    if conf[CONF_NO_STRIP]:
+        prepend_env_value('CPPFLAGS', '-g -O0', seperator=' ')
+
+    prepend_env_value('LDFLAGS',
+                      '-L%s' % os.path.join(prefix, 'lib'),
+                      seperator=' ')
+
+    prepend_env_value('LDFLAGS',
+                      '-L%s' % os.path.join(prefix, 'lib64'),
+                      seperator=' ')
+
+    prepend_env_value('PATH', os.path.join(prefix, 'bin'))
+    prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib',
+                                                      'pkgconfig'))
+    prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib64',
+                                                      'pkgconfig'))
+
+
+def copy_pro_libs():
+    '''Copy pro.py and python libs for Seafile Professional to
+    seafile-server/pro/python
+
+    '''
+    builddir = conf[CONF_BUILDDIR]
+    pro_program_dir = os.path.join(builddir, 'seafile-server', 'pro')
+    if not os.path.exists(pro_program_dir):
+        must_mkdir(pro_program_dir)
+
+    pro_misc_dir = os.path.join(pro_program_dir, 'misc')
+    if not os.path.exists(pro_misc_dir):
+        must_mkdir(pro_misc_dir)
+
+    pro_libs_dir = os.path.join(pro_program_dir, 'python')
+    must_mkdir(pro_libs_dir)
+
+    must_copytree(conf[CONF_PROLIBSDIR], pro_libs_dir)
+
+    pro_py = os.path.join(Seafile().projdir, 'scripts', 'pro.py')
+    must_copy(pro_py, pro_program_dir)
+
+    seahub_extra_sql_sqlite3 = os.path.join(Seafile().projdir, 'scripts',
+                                            'seahub_extra.sqlite3.sql')
+    seahub_extra_sql_mysql = os.path.join(Seafile().projdir, 'scripts',
+                                          'seahub_extra.mysql.sql')
+    must_copy(seahub_extra_sql_sqlite3, pro_misc_dir)
+    must_copy(seahub_extra_sql_mysql, pro_misc_dir)
+
+    uncompress_seafes_seafevents()
+
+
+def uncompress_seafes_seafevents():
+    '''Extract seafes.tar.gz and seafevents.tar.gz, libevent.tar.gz to
+    seafile-server/pro/python
+
+    '''
+    builddir = conf[CONF_BUILDDIR]
+    pro_libs_dir = os.path.join(builddir, 'seafile-server', 'pro', 'python')
+
+    tarball = os.path.join(conf[CONF_SRCDIR], 'seafes.tar.gz')
+    if run('tar xf %s -C %s' % (tarball, pro_libs_dir)) != 0:
+        error('failed to uncompress %s' % tarball)
+
+    tarball = os.path.join(conf[CONF_SRCDIR], 'seafevents.tar.gz')
+    if run('tar xf %s -C %s' % (tarball, pro_libs_dir)) != 0:
+        error('failed to uncompress %s' % tarball)
+
+    tarball = os.path.join(conf[CONF_SRCDIR], 'libevent.tar.gz')
+    if run('tar xf %s -C %s' % (tarball, pro_libs_dir)) != 0:
+        error('failed to uncompress %s' % tarball)
+
+
+def copy_seafdav():
+    dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub',
+                           'thirdpart')
+    tarball = os.path.join(conf[CONF_SRCDIR], 'seafdav.tar.gz')
+    if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0:
+        error('failed to uncompress %s' % tarball)
+
+    dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub',
+                           'thirdpart')
+    tarball = os.path.join(conf[CONF_SRCDIR], 'seafobj.tar.gz')
+    if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0:
+        error('failed to uncompress %s' % tarball)
+
+
+def copy_elasticsearch():
+    '''Extract elasticsearch to seafile-server/pro/'''
+    builddir = conf[CONF_BUILDDIR]
+    pro_dir = os.path.join(builddir, 'seafile-server', 'pro')
+    es_tarball = os.path.join(conf[CONF_SRCDIR], 'elasticsearch.tar.gz')
+
+    if run('tar xf %s -C %s' % (es_tarball, pro_dir)) != 0:
+        error('failed to uncompress elasticsearch')
+
+
+def copy_user_manuals():
+    builddir = conf[CONF_BUILDDIR]
+    src_pattern = os.path.join(builddir, Seafile().projdir, 'doc',
+                               'seafile-tutorial.doc')
+    dst_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'docs')
+
+    must_mkdir(dst_dir)
+
+    for path in glob.glob(src_pattern):
+        must_copy(path, dst_dir)
+
+
+def copy_scripts_and_libs():
+    '''Copy server release scripts and shared libs, as well as seahub
+    thirdpart libs
+
+    '''
+    builddir = conf[CONF_BUILDDIR]
+    scripts_srcdir = os.path.join(builddir, Seafile().projdir, 'scripts')
+    serverdir = os.path.join(builddir, 'seafile-server')
+
+    must_copy(os.path.join(scripts_srcdir, 'setup-seafile.sh'), serverdir)
+    must_copy(
+        os.path.join(scripts_srcdir, 'setup-seafile-mysql.sh'), serverdir)
+    must_copy(
+        os.path.join(scripts_srcdir, 'setup-seafile-mysql.py'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'seafile.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'seahub.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'reset-admin.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'seaf-fuse.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'seaf-gc.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'seaf-fsck.sh'), serverdir)
+    must_copy(
+        os.path.join(scripts_srcdir, 'seafile-background-tasks.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'check_init_admin.py'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'check-db-type.py'), serverdir)
+
+    # Command line for real-time backup server
+    must_copy(os.path.join(scripts_srcdir, 'seaf-backup-cmd.py'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'seaf-backup-cmd.sh'), serverdir)
+    # copy seaf-import, store_encrypt related scripts
+    must_copy(os.path.join(scripts_srcdir, 'seaf-import.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'seaf-gen-key.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'seaf-encrypt.sh'), serverdir)
+
+    # general migrate script
+    must_copy(os.path.join(scripts_srcdir, 'migrate.py'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'migrate.sh'), serverdir)
+
+    # general migrate repo script
+    must_copy(os.path.join(scripts_srcdir, 'migrate-repo.py'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'migrate-repo.sh'), serverdir)
+
+    # general seafes script
+    must_copy(os.path.join(scripts_srcdir, 'run_index_master.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'run_index_worker.sh'), serverdir)
+    must_copy(os.path.join(scripts_srcdir, 'index_op.py'), serverdir)
+
+    # copy update scripts
+    update_scriptsdir = os.path.join(scripts_srcdir, 'upgrade')
+    dst_update_scriptsdir = os.path.join(serverdir, 'upgrade')
+    try:
+        shutil.copytree(update_scriptsdir, dst_update_scriptsdir)
+    except Exception as e:
+        error('failed to copy upgrade scripts: %s' % e)
+
+    # copy sql scripts
+    sql_scriptsdir = os.path.join(scripts_srcdir, 'sql')
+    dst_sql_scriptsdir = os.path.join(serverdir, 'sql')
+    try:
+        shutil.copytree(sql_scriptsdir, dst_sql_scriptsdir)
+    except Exception as e:
+        error('failed to copy sql scripts: %s' % e)
+
+    # copy create db sql scripts
+    create_db_scriptsdir = os.path.join(scripts_srcdir, 'create-db')
+    dst_create_db_scriptsdir = os.path.join(serverdir, 'create-db')
+    try:
+        shutil.copytree(create_db_scriptsdir, dst_create_db_scriptsdir)
+    except Exception as e:
+        error('failed to copy create db scripts: %s' % e)
+
+    seahub_oracle_sql_script = os.path.join(Seahub().projdir, 'sql', 'oracle.sql')
+    must_copy(seahub_oracle_sql_script, os.path.join(dst_create_db_scriptsdir, 'oracle', 'seahub_db.sql'))
+
+    # copy runtime/seahub.conf
+    runtimedir = os.path.join(serverdir, 'runtime')
+    must_mkdir(runtimedir)
+    must_copy(os.path.join(scripts_srcdir, 'seahub.conf'), runtimedir)
+
+    # move seahub to seafile-server/seahub
+    src_seahubdir = Seahub().projdir
+    dst_seahubdir = os.path.join(serverdir, 'seahub')
+    try:
+        shutil.move(src_seahubdir, dst_seahubdir)
+    except Exception as e:
+        error('failed to move seahub to seafile-server/seahub: %s' % e)
+
+    # copy seahub thirdpart libs
+    seahub_thirdpart = os.path.join(dst_seahubdir, 'thirdpart')
+    copy_seahub_thirdpart_libs(seahub_thirdpart)
+    copy_seafdav()
+    copy_seahub_extra()
+
+    # copy pro libs & elasticsearch
+    copy_pro_libs()
+    copy_elasticsearch()
+    copy_pdf2htmlex()
+
+    # copy shared c libs
+    copy_shared_libs()
+    copy_user_manuals()
+
+
+def copy_pdf2htmlex():
+    '''Copy pdf2htmlEX exectuable and its dependent libs'''
+    pdf2htmlEX_executable = find_in_path('pdf2htmlEX')
+    libs = get_dependent_libs(pdf2htmlEX_executable)
+
+    builddir = conf[CONF_BUILDDIR]
+    dst_lib_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'lib')
+
+    dst_bin_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'bin')
+
+    for lib in libs:
+        dst_file = os.path.join(dst_lib_dir, os.path.basename(lib))
+        if os.path.exists(dst_file):
+            continue
+        info('Copying %s' % lib)
+        must_copy(lib, dst_lib_dir)
+
+    must_copy(pdf2htmlEX_executable, dst_bin_dir)
+
+
+def get_dependent_libs(executable):
+    syslibs = ['libsearpc', 'libccnet', 'libseafile', 'libpthread.so',
+               'libc.so', 'libm.so', 'librt.so', 'libdl.so', 'libselinux.so',
+               'libresolv.so', 'libnss3.so', 'libnssutil3.so', 'libssl3.so']
+
+    def is_syslib(lib):
+        for syslib in syslibs:
+            if syslib in lib:
+                return True
+        return False
+
+    ldd_output = subprocess.getoutput('ldd %s' % executable)
+    if 'not found' in ldd_output:
+        print(ldd_output)
+        error('some deps of %s not found' % executable)
+    ret = set()
+    for line in ldd_output.splitlines():
+        tokens = line.split()
+        if len(tokens) != 4:
+            continue
+        if is_syslib(tokens[0]):
+            continue
+
+        ret.add(tokens[2])
+
+    return ret
+
+
+def copy_shared_libs():
+    '''copy shared c libs, such as libevent, glib, libmysqlclient'''
+    builddir = conf[CONF_BUILDDIR]
+
+    dst_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'lib')
+
+    seafile_path = os.path.join(builddir, 'seafile-server', 'seafile', 'bin',
+                                'seaf-server')
+
+    ccnet_server_path = os.path.join(builddir, 'seafile-server', 'seafile',
+                                     'bin', 'ccnet-server')
+
+    seaf_fuse_path = os.path.join(builddir, 'seafile-server', 'seafile', 'bin',
+                                  'seaf-fuse')
+
+    libs = set()
+    libs.update(get_dependent_libs(ccnet_server_path))
+    libs.update(get_dependent_libs(seafile_path))
+    libs.update(get_dependent_libs(seaf_fuse_path))
+
+    for lib in libs:
+        dst_file = os.path.join(dst_dir, os.path.basename(lib))
+        if os.path.exists(dst_file):
+            continue
+        info('Copying %s' % lib)
+        shutil.copy(lib, dst_dir)
+
+
+def copy_seahub_thirdpart_libs(seahub_thirdpart):
+    '''copy django/djblets/gunicorn from ${thirdpartdir} to
+    seahub/thirdpart
+
+    '''
+    src = conf[CONF_THIRDPARTDIR]
+    dst = seahub_thirdpart
+
+    must_copytree(src, dst)
+
+
+def copy_seahub_extra():
+    '''uncompress seahub-extra.tar.gz to seafile-server/seahub-extra'''
+    tarball = os.path.join(conf[CONF_SRCDIR], 'seahub-extra.tar.gz')
+    builddir = conf[CONF_BUILDDIR]
+    seahub_dir = os.path.join(builddir, 'seafile-server')
+
+    if run('tar xf %s -C %s' % (tarball, seahub_dir)) != 0:
+        error('failed to uncompress elasticsearch')
+
+
+def strip_symbols():
+    def do_strip(fn):
+        run('chmod u+w %s' % fn)
+        info('stripping:    %s' % fn)
+        run('strip "%s"' % fn)
+
+    def remove_static_lib(fn):
+        info('removing:     %s' % fn)
+        os.remove(fn)
+
+    for parent, dnames, fnames in os.walk('seafile-server/seafile'):
+        dummy = dnames  # avoid pylint 'unused' warning
+        for fname in fnames:
+            fn = os.path.join(parent, fname)
+            if os.path.isdir(fn):
+                continue
+
+            if fn.endswith(".a") or fn.endswith(".la"):
+                remove_static_lib(fn)
+                continue
+
+            if os.path.islink(fn):
+                continue
+
+            finfo = subprocess.getoutput('file "%s"' % fn)
+
+            if 'not stripped' in finfo:
+                do_strip(fn)
+
+
+def create_tarball(tarball_name):
+    '''call tar command to generate a tarball'''
+    version = conf[CONF_VERSION]
+
+    serverdir = 'seafile-server'
+    versioned_serverdir = 'seafile-pro-server-' + version
+
+    # move seafile-server to seafile-server-${version}
+    try:
+        shutil.move(serverdir, versioned_serverdir)
+    except Exception as e:
+        error('failed to move %s to %s: %s' %
+              (serverdir, versioned_serverdir, e))
+
+    ignored_patterns = [
+        # common ignored files
+        '*.pyc',
+        '*~',
+        '*#',
+
+        # seahub
+        os.path.join(versioned_serverdir, 'seahub', '.git*'),
+        os.path.join(versioned_serverdir, 'seahub', 'media', 'flexpaper*'),
+        os.path.join(versioned_serverdir, 'seahub', 'avatar', 'testdata*'),
+
+        # seafile
+        os.path.join(versioned_serverdir, 'seafile', 'share*'),
+        os.path.join(versioned_serverdir, 'seafile', 'include*'),
+        os.path.join(versioned_serverdir, 'seafile', 'lib', 'pkgconfig*'),
+        os.path.join(versioned_serverdir, 'seafile', 'lib64', 'pkgconfig*'),
+        os.path.join(versioned_serverdir, 'seafile', 'bin', 'ccnet-demo*'),
+        os.path.join(versioned_serverdir, 'seafile', 'bin', 'ccnet-tool'),
+        os.path.join(versioned_serverdir, 'seafile', 'bin', 'ccnet-servtool'),
+        os.path.join(versioned_serverdir, 'seafile', 'bin',
+                     'searpc-codegen.py'),
+        os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile-admin'),
+        os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile'),
+    ]
+
+    excludes_list = ['--exclude=%s' % pattern for pattern in ignored_patterns]
+    excludes = ' '.join(excludes_list)
+
+    tar_cmd = 'tar czvf %(tarball_name)s %(versioned_serverdir)s %(excludes)s' \
+              % dict(tarball_name=tarball_name,
+                     versioned_serverdir=versioned_serverdir,
+                     excludes=excludes)
+
+    if run(tar_cmd, suppress_stdout=True) != 0:
+        error('failed to generate the tarball')
+
+
+def gen_tarball():
+    # strip symbols of libraries to reduce size
+    if not conf[CONF_NO_STRIP]:
+        try:
+            strip_symbols()
+        except Exception as e:
+            error('failed to strip symbols: %s' % e)
+
+    # determine the output name
+    # 64-bit: seafile-server_1.2.2_x86-64.tar.gz
+    # 32-bit: seafile-server_1.2.2_i386.tar.gz
+    version = conf[CONF_VERSION]
+    arch = os.uname()[-1].replace('_', '-')
+    if arch != 'x86-64':
+        arch = 'i386'
+
+    dbg = ''
+    if conf[CONF_NO_STRIP]:
+        dbg = '.dbg'
+
+    no_ceph = ''
+    if conf[CONF_NO_CEPH]:
+        no_ceph = '.no-ceph'
+
+    brand = ''
+    if conf[CONF_BRAND]:
+        brand = '-%s' % conf[CONF_BRAND]
+
+    tarball_name = 'seafile-pro-server_%(version)s_%(arch)s%(brand)s%(no_ceph)s%(dbg)s.tar.gz' \
+                   % dict(version=version, arch=arch, dbg=dbg, no_ceph=no_ceph, brand=brand)
+    dst_tarball = os.path.join(conf[CONF_OUTPUTDIR], tarball_name)
+
+    # generate the tarball
+    try:
+        create_tarball(tarball_name)
+    except Exception as e:
+        error('failed to generate tarball: %s' % e)
+
+    # move tarball to outputdir
+    try:
+        shutil.copy(tarball_name, dst_tarball)
+    except Exception as e:
+        error('failed to copy %s to %s: %s' % (tarball_name, dst_tarball, e))
+
+    print('---------------------------------------------')
+    print('The build is successfully. Output is:\t%s' % dst_tarball)
+    print('---------------------------------------------')
+
+
+def main():
+    parse_args()
+    setup_build_env()
+
+    libsearpc = Libsearpc()
+    ccnet = Ccnet()
+    seafile = Seafile()
+    seahub = Seahub()
+
+    libsearpc.uncompress()
+    libsearpc.build()
+
+    ccnet.uncompress()
+    ccnet.build()
+
+    seafile.uncompress()
+    seafile.build()
+
+    seahub.uncompress()
+    seahub.build()
+
+    copy_scripts_and_libs()
+    gen_tarball()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/scripts/build/build-server.py b/scripts/build/build-server.py
index dbe8355abc..a479b6c75c 100755
--- a/scripts/build/build-server.py
+++ b/scripts/build/build-server.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 # coding: UTF-8
 
 '''This script builds the seafile server tarball.
diff --git a/scripts/build/office.py b/scripts/build/office.py
new file mode 100644
index 0000000000..9e036aac9a
--- /dev/null
+++ b/scripts/build/office.py
@@ -0,0 +1,69 @@
+import os
+import sys
+import subprocess
+import shutil
+
+def get_dependent_libs(executable):
+    syslibs = ['libsearpc', 'libccnet', 'libseafile', 'libpthread.so', 'libc.so', 'libm.so', 'librt.so', 'libdl.so', 'libselinux.so', 'libresolv.so' ]
+    def is_syslib(lib):
+        for syslib in syslibs:
+            if syslib in lib:
+                return True
+        return False
+
+    ldd_output = subprocess.getoutput('ldd %s' % executable)
+    ret = []
+    for line in ldd_output.splitlines():
+        tokens = line.split()
+        if len(tokens) != 4:
+            continue
+        if is_syslib(tokens[0]):
+            continue
+
+        ret.append(tokens[2])
+
+    return ret
+
+def prepend_env_value(name, value, seperator=':'):
+    '''append a new value to a list'''
+    try:
+        current_value = os.environ[name]
+    except KeyError:
+        current_value = ''
+
+    new_value = value
+    if current_value:
+        new_value += seperator + current_value
+
+    os.environ[name] = new_value
+
+def main():
+    prepend_env_value ('LD_LIBRARY_PATH',
+                       '/tmp/seafile-pro-server-build/seafile-pro-server-1.6.5/seafile/lib')
+    destdir = sys.argv[1]
+    dest_libdir = os.path.join(destdir, 'lib')
+    dest_bindir = os.path.join(destdir, 'bin')
+
+    for d in (dest_bindir, dest_libdir):
+        if not os.path.exists(d):
+            os.makedirs(d)
+        elif not os.path.isdir(d):
+            raise RuntimeError('"%s" is not a directory!' % d)
+
+    bindir = '/tmp/seafile-pro-server-build/seafile-pro-server-1.6.5/seafile/bin'
+    httpserver = os.path.join(bindir, 'httpserver')
+    pdf2htmlEX = os.path.join(bindir, 'pdf2htmlEX')
+
+    httpserver_libs = get_dependent_libs(httpserver)
+    pdf2htmlEX_libs = get_dependent_libs(pdf2htmlEX)
+
+    needed_libs = set(pdf2htmlEX_libs) - set(httpserver_libs)
+    for lib in needed_libs:
+        dest = os.path.join(dest_libdir, os.path.basename(lib))
+        if not os.path.exists(dest):
+            shutil.copy(lib, dest)
+
+    shutil.copy(pdf2htmlEX, dest_bindir)
+
+if __name__ == '__main__':
+    main()
diff --git a/scripts/check-db-type.py b/scripts/check-db-type.py
new file mode 100644
index 0000000000..daf2f67703
--- /dev/null
+++ b/scripts/check-db-type.py
@@ -0,0 +1,23 @@
+import sys
+from configparser import ConfigParser
+
+if len(sys.argv) != 2:
+    print('check-db-type.py <seafile-config-file>', file=sys.stderr)
+
+seafile_conf_file = sys.argv[1]
+
+parser = ConfigParser()
+parser.read(seafile_conf_file)
+
+if not parser.has_option('database', 'type'):
+    print('sqlite')
+else:
+    db_type = parser.get('database', 'type')
+    if db_type == 'sqlite':
+        print('sqlite')
+    elif db_type == 'mysql':
+        print('mysql')
+    elif db_type == 'pgsql':
+        print('pgsql')
+    else:
+        print('unknown')
diff --git a/scripts/create-db/oracle/ccnet_db.sql b/scripts/create-db/oracle/ccnet_db.sql
new file mode 100644
index 0000000000..b42e5c03a7
--- /dev/null
+++ b/scripts/create-db/oracle/ccnet_db.sql
@@ -0,0 +1,71 @@
+-- User db tables
+CREATE TABLE EmailUser (
+  id int PRIMARY KEY,
+  email varchar2(255),
+  passwd varchar2(256),
+  is_staff int,
+  is_active int,
+  ctime number,
+  UNIQUE (email)
+);
+CREATE SEQUENCE EmailUsersIdSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
+CREATE TABLE LDAPUsers (
+  id int PRIMARY KEY,
+  email varchar2(255),
+  password varchar2(256),
+  is_staff int,
+  is_active int,
+  extra_attrs varchar2(1024),
+  UNIQUE (email)
+);
+CREATE SEQUENCE LDAPUsersIdSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
+CREATE TABLE UserRole (
+  email varchar2(255),
+  role varchar2(255),
+  UNIQUE (email, role)
+);
+
+-- Group db tables
+CREATE TABLE "Group" (
+  group_id int PRIMARY KEY,
+  group_name varchar2(255),
+  creator_name varchar2(255),
+  timestamp number,
+  type varchar2(32)
+);
+CREATE SEQUENCE GroupIdSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
+CREATE TABLE GroupUser (
+  group_id int,
+  user_name varchar2(255),
+  is_staff int,
+  UNIQUE (group_id, user_name)
+);
+CREATE INDEX GroupUserNameIndex ON GroupUser (user_name);
+CREATE TABLE GroupDNPair (
+  group_id int,
+  dn varchar2(255)
+);
+
+-- Org db tables
+CREATE TABLE Organization (
+  org_id int PRIMARY KEY,
+  org_name varchar2(255),
+  url_prefix varchar2(255),
+  creator varchar2(255),
+  ctime number,
+  UNIQUE (url_prefix)
+);
+CREATE SEQUENCE OrgIdSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
+CREATE TABLE OrgUser (
+  org_id int,
+  email varchar2(255),
+  is_staff int,
+  UNIQUE (org_id, email)
+);
+CREATE INDEX OrgUserEmailIndex ON OrgUser (email);
+CREATE TABLE OrgGroup (
+  org_id int,
+  group_id int,
+  UNIQUE (org_id, group_id)
+);
+CREATE INDEX OrgGroupIdIndex ON OrgGroup (group_id);
diff --git a/scripts/create-db/oracle/seafile_db.sql b/scripts/create-db/oracle/seafile_db.sql
new file mode 100644
index 0000000000..efe22392ab
--- /dev/null
+++ b/scripts/create-db/oracle/seafile_db.sql
@@ -0,0 +1,228 @@
+CREATE TABLE Branch (
+  name varchar2(10),
+  repo_id char(36),
+  commit_id char(40),
+  PRIMARY KEY (repo_id,name)
+);
+CREATE TABLE FileLockTimestamp (
+  repo_id char(36),
+  update_time number,
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE FileLocks (
+  repo_id char(36),
+  path varchar2(1024 char),
+  user_name varchar2(255),
+  lock_time number,
+  expire number
+);
+CREATE INDEX FileLocksIndex ON FileLocks (repo_id);
+CREATE TABLE FolderGroupPerm (
+  repo_id char(36),
+  path varchar2(1024 char),
+  permission varchar2(15),
+  group_id int
+);
+CREATE INDEX FolderGroupPermIndex ON FolderGroupPerm (repo_id);
+CREATE TABLE FolderPermTimestamp (
+  repo_id char(36),
+  timestamp number,
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE FolderUserPerm (
+  repo_id char(36),
+  path varchar2(1024 char),
+  permission varchar2(15),
+  "user" varchar2(255)
+);
+CREATE INDEX FolderUserPermIndex ON FolderUserPerm (repo_id);
+CREATE TABLE GCID (
+  repo_id char(36),
+  gc_id char(36),
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE GarbageRepos (
+  repo_id char(36),
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE InnerPubRepo (
+  repo_id char(36),
+  permission varchar2(15),
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE LastGCID (
+  repo_id char(36),
+  client_id varchar2(128),
+  gc_id char(36),
+  PRIMARY KEY (repo_id,client_id)
+);
+CREATE TABLE OrgGroupRepo (
+  org_id int,
+  repo_id char(36),
+  group_id int,
+  owner varchar2(255),
+  permission varchar2(15),
+  PRIMARY KEY (org_id,group_id,repo_id)
+);
+CREATE INDEX OrgGroupRepoIdIndex ON OrgGroupRepo (repo_id);
+CREATE INDEX OrgGroupRepoOwnerIndex ON OrgGroupRepo (owner);
+CREATE TABLE OrgInnerPubRepo (
+  org_id int,
+  repo_id char(36),
+  permission varchar2(15),
+  PRIMARY KEY (org_id,repo_id)
+);
+CREATE TABLE OrgQuota (
+  org_id int,
+  quota number,
+  PRIMARY KEY (org_id)
+);
+CREATE TABLE OrgRepo (
+  org_id int,
+  repo_id char(36),
+  "user" varchar2(255),
+  PRIMARY KEY (org_id,repo_id),
+  UNIQUE (repo_id)
+);
+CREATE INDEX OrgRepoOrgIdIndex ON OrgRepo (org_id, "user");
+CREATE TABLE OrgSharedRepo (
+  id int,
+  org_id int,
+  repo_id char(36),
+  from_email varchar2(255),
+  to_email varchar2(255),
+  permission varchar2(15),
+  PRIMARY KEY (id)
+);
+CREATE SEQUENCE OrgSharedRepoSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
+CREATE INDEX OrgSharedRepoIdIndex ON OrgSharedRepo (org_id, repo_id);
+CREATE INDEX OrgSharedRepoFromEmailIndex ON OrgSharedRepo (from_email);
+CREATE INDEX OrgSharedRepoToEmailIndex ON OrgSharedRepo (to_email);
+CREATE TABLE OrgUserQuota (
+  org_id int,
+  "user" varchar2(255),
+  quota number,
+  PRIMARY KEY (org_id,"user")
+);
+CREATE TABLE Repo (
+  repo_id char(36),
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE RepoFileCount (
+  repo_id char(36),
+  file_count number,
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE RepoGroup (
+  repo_id char(36),
+  group_id int,
+  user_name varchar2(255),
+  permission varchar2(15),
+  PRIMARY KEY (group_id,repo_id)
+);
+CREATE INDEX RepoGroupIdIndex ON RepoGroup (repo_id);
+CREATE INDEX RepoGroupUsernameIndex ON RepoGroup (user_name);
+CREATE TABLE RepoHead (
+  repo_id char(36),
+  branch_name varchar2(10),
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE RepoHistoryLimit (
+  repo_id char(36),
+  days int,
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE RepoOwner (
+  repo_id char(36),
+  owner_id varchar2(255),
+  PRIMARY KEY (repo_id)
+);
+CREATE INDEX RepoOwnerNameIndex ON RepoOwner (owner_id);
+CREATE TABLE RepoSize (
+  repo_id char(36),
+  "size" number,
+  head_id char(40),
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE RepoSyncError (
+  token char(40),
+  error_time number,
+  error_con varchar2(50),
+  PRIMARY KEY (token)
+);
+CREATE TABLE RepoTokenPeerInfo (
+  token char(40),
+  peer_id char(40),
+  peer_ip varchar2(40),
+  peer_name varchar2(255),
+  sync_time number,
+  client_ver varchar2(20),
+  PRIMARY KEY (token)
+);
+CREATE TABLE RepoTrash (
+  repo_id char(36),
+  repo_name varchar2(255),
+  head_id char(40),
+  owner_id varchar2(255),
+  "size" number,
+  org_id int,
+  del_time number,
+  PRIMARY KEY (repo_id)
+);
+CREATE INDEX RepoTrashOwnerIndex ON RepoTrash (owner_id);
+CREATE INDEX RepoTrashOrgIdIndex ON RepoTrash (org_id);
+CREATE TABLE RepoUserToken (
+  repo_id char(36),
+  email varchar2(255),
+  token char(40),
+  PRIMARY KEY (repo_id,token)
+);
+CREATE INDEX RepoUserTokenEmailIndex ON RepoUserToken (email);
+CREATE TABLE RepoValidSince (
+  repo_id char(36),
+  timestamp number,
+  PRIMARY KEY (repo_id)
+);
+CREATE TABLE SharedRepo (
+  id int,
+  repo_id char(36),
+  from_email varchar2(255),
+  to_email varchar2(255),
+  permission varchar2(15)
+);
+CREATE SEQUENCE SharedRepoSeq MINVALUE 1 START WITH 1 INCREMENT BY 1;
+CREATE INDEX SharedRepoIdIndex ON SharedRepo (repo_id);
+CREATE INDEX SharedRepoFromEmailIndex ON SharedRepo (from_email);
+CREATE INDEX SharedRepoToEmailIndex ON SharedRepo (to_email);
+CREATE TABLE SystemInfo (
+  info_key varchar2(256) PRIMARY KEY,
+  info_value varchar2(1024)
+);
+CREATE TABLE UserQuota (
+  "user" varchar2(255),
+  quota number,
+  PRIMARY KEY ("user")
+);
+CREATE TABLE UserShareQuota (
+  "user" varchar2(255),
+  quota number,
+  PRIMARY KEY ("user")
+);
+CREATE TABLE RoleQuota (
+  role varchar2(255),
+  quota number,
+  PRIMARY KEY (role)
+);
+CREATE TABLE VirtualRepo (
+  repo_id char(36),
+  origin_repo char(36),
+  path varchar2(1024 char),
+  base_commit char(40),
+  PRIMARY KEY (repo_id)
+);
+CREATE INDEX VirtualRepoOriginIndex ON VirtualRepo (origin_repo);
+CREATE TABLE WebUploadTempFiles (
+  repo_id char(36),
+  file_path varchar2(1024 char),
+  tmp_file_path varchar2(1024 char)
+);
diff --git a/scripts/index_op.py b/scripts/index_op.py
new file mode 100755
index 0000000000..99e851cf1c
--- /dev/null
+++ b/scripts/index_op.py
@@ -0,0 +1,49 @@
+import logging
+import argparse
+
+from seafes.config import seafes_config
+from seafes.repo_data import repo_data
+from seafes.mq import get_mq
+
+seafes_config.load_index_master_conf()
+mq = get_mq(seafes_config.subscribe_mq,
+            seafes_config.subscribe_server,
+            seafes_config.subscribe_port,
+            seafes_config.subscribe_password)
+
+def put_to_redis(repo_id, cmt_id):
+    msg = "index_recover\t%s\t%s" % (repo_id, cmt_id)
+    mq.lpush('index_task', msg)
+
+def show_all_task():
+    logging.info("index task count: %s" % mq.llen('index_task'))
+
+def restore_all_repo():
+    start, count = 0, 1000
+    while True:
+        try:
+            repo_commits = repo_data.get_repo_id_commit_id(start, count)
+        except Exception as e:
+            logging.error("Error: %s" % e)
+            return
+        else:
+            if len(repo_commits) == 0:
+                break
+            for repo_id, commit_id in repo_commits:
+                put_to_redis(repo_id, commit_id)
+            start += 1000
+
+def main():
+    parser = argparse.ArgumentParser(description='main program')
+    parser.add_argument('--mode')
+    parser_args = parser.parse_args()
+
+    if parser_args.mode == 'resotre_all_repo':
+        restore_all_repo()
+    elif parser_args.mode == 'show_all_task':
+        show_all_task()
+
+
+if __name__ == '__main__':
+    main()
+
diff --git a/scripts/migrate-repo.py b/scripts/migrate-repo.py
new file mode 100644
index 0000000000..a4fd7328df
--- /dev/null
+++ b/scripts/migrate-repo.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python3
+
+import os
+import sys
+import logging
+import configparser 
+from sqlalchemy import create_engine, text
+from sqlalchemy.orm import sessionmaker
+from migrate import ObjMigrateWorker
+from seafobj.objstore_factory import objstore_factory
+from seaserv import seafile_api as api
+from seaserv import REPO_STATUS_READ_ONLY, REPO_STATUS_NORMAL
+
+logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
+
+def main(argv):
+    if len(argv) == 4:
+        all_migrate = False
+        repo_id = argv[1]
+        orig_storage_id = argv[2]
+        dest_storage_id = argv[3]
+    elif len(argv) == 3:
+        all_migrate = True
+        orig_storage_id = argv[1]
+        dest_storage_id = argv[2]
+
+    if all_migrate:
+        migrate_repos(orig_storage_id, dest_storage_id)
+    else:
+        migrate_repo(repo_id, orig_storage_id, dest_storage_id)
+
+def parse_seafile_config():
+    env = os.environ
+    seafile_conf = os.path.join(env['SEAFILE_CENTRAL_CONF_DIR'], 'seafile.conf')
+    cp = configparser.ConfigParser()
+    cp.read(seafile_conf)
+    host = cp.get('database', 'host')
+    port = cp.get('database', 'port')
+    user = cp.get('database', 'user')
+    passwd = cp.get('database', 'password')
+    db_name = cp.get('database', 'db_name')
+    return host, port, user, passwd, db_name
+
+def get_repo_ids():
+    host, port, user, passwd, db_name = parse_seafile_config()
+    url = 'mysql+pymysql://' + user + ':' + passwd + '@' + host + ':' + port + '/' + db_name
+    print(url)
+    sql = 'SELECT repo_id FROM Repo'
+    try:
+        engine = create_engine(url, echo=False)
+        session = sessionmaker(engine)()
+        result_proxy = session.execute(text(sql))
+    except:
+        return None
+    else:
+        result = result_proxy.fetchall()
+    return result
+
+def migrate_repo(repo_id, orig_storage_id, dest_storage_id):
+    api.set_repo_status (repo_id, REPO_STATUS_READ_ONLY)
+    dtypes = ['commits', 'fs', 'blocks']
+    workers = []
+    for dtype in dtypes:
+        obj_stores = objstore_factory.get_obj_stores(dtype)
+        #If these storage ids passed in do not exist in conf, stop migrate this repo.
+        if orig_storage_id not in obj_stores or dest_storage_id not in obj_stores:
+            logging.warning('Storage id passed in does not exist in configuration.\n')
+            api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+            sys.exit()
+
+        orig_store = obj_stores[orig_storage_id]
+        dest_store = obj_stores[dest_storage_id]
+        
+        try:
+            worker = ObjMigrateWorker (orig_store, dest_store, dtype, repo_id)
+            worker.start()
+            workers.append(worker)
+        except:
+            logging.warning('Failed to migrate repo %s.', repo_id)
+    
+    try:
+        for w in workers:
+            w.join()
+    except:
+        api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+        sys.exit(1)
+    
+    for w in workers:
+        if w.exit_code == 1:
+            logging.warning(w.exception)
+            api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+            sys.exit(1)
+
+    if api.update_repo_storage_id(repo_id, dest_storage_id) < 0:
+        logging.warning('Failed to update repo [%s] storage_id.\n', repo_id)
+        api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+        return
+
+    api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+    logging.info('The process of migrating repo [%s] is over.\n', repo_id)
+
+def migrate_repos(orig_storage_id, dest_storage_id):
+    repo_ids = get_repo_ids()
+
+    for repo_id in repo_ids:
+        try:
+            repo_id = repo_id[0]
+        except:
+            continue
+        api.set_repo_status (repo_id, REPO_STATUS_READ_ONLY)
+        dtypes = ['commits', 'fs', 'blocks']
+        workers = []
+        for dtype in dtypes:
+            obj_stores = objstore_factory.get_obj_stores(dtype)
+            #If these storage ids passed in do not exist in conf, stop migrate this repo.
+            if orig_storage_id not in obj_stores or dest_storage_id not in obj_stores:
+                logging.warning('Storage id passed in does not exist in configuration.\n')
+                api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+                sys.exit()
+
+            orig_store = obj_stores[orig_storage_id]
+            dest_store = obj_stores[dest_storage_id]
+            
+            try:
+                worker = ObjMigrateWorker (orig_store, dest_store, dtype, repo_id)
+                worker.start()
+                workers.append(worker)
+            except:
+                logging.warning('Failed to migrate repo %s.', repo_id)
+
+        try:
+            for w in workers:
+                w.join()
+        except:
+            api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+            sys.exit(1)
+        
+        for w in workers:
+            if w.exit_code == 1:
+                logging.warning(w.exception)
+                api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+                sys.exit(1)
+
+        if api.update_repo_storage_id(repo_id, dest_storage_id) < 0:
+            logging.warning('Failed to update repo [%s] storage_id.\n', repo_id)
+            api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+            return
+
+        api.set_repo_status (repo_id, REPO_STATUS_NORMAL)
+        logging.info('The process of migrating repo [%s] is over.\n', repo_id)
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/scripts/migrate-repo.sh b/scripts/migrate-repo.sh
new file mode 100755
index 0000000000..b4afa22ffc
--- /dev/null
+++ b/scripts/migrate-repo.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+echo ""
+
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+TOPDIR=$(dirname "${INSTALLPATH}")
+default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
+default_conf_dir=${TOPDIR}/conf
+seafile_rpc_pipe_path=${INSTALLPATH}/runtime
+migrate=${INSTALLPATH}/migrate-repo.py
+
+script_name=$0
+function usage () {
+    echo "usage : "
+    echo "    ./$(basename ${script_name})" \
+         "[repo id to migrate]" \
+         "<origin storage id>" \
+         "<destination storage id>"
+    echo""
+}
+
+function check_python_executable() {
+    if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
+        return 0
+    fi
+
+    if which python3 2>/dev/null 1>&2; then
+        PYTHON=python3
+    elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
+        echo
+        echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
+        echo
+        exit 1
+    else
+        PYTHON="python"$(python --version | cut -b 8-10)
+        if !which $PYTHON 2>/dev/null 1>&2; then
+            echo
+            echo "Can't find a python executable of $PYTHON in PATH"
+            echo "Install $PYTHON before continue."
+            echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
+            echo
+            exit 1
+        fi
+    fi
+}
+
+function do_migrate () {
+    export CCNET_CONF_DIR=${default_ccnet_conf_dir}
+    export SEAFILE_CONF_DIR=${default_seafile_data_dir}
+    export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir}
+    export SEAFILE_RPC_PIPE_PATH=${seafile_rpc_pipe_path}
+    export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
+    $PYTHON ${migrate} $@
+}
+
+check_python_executable;
+
+if [ $# -gt 0 ];
+then
+    for param in $@;
+    do
+        if [ ${param} = "-h" -o ${param} = "--help" ];
+        then
+            usage;
+            exit 1;
+        fi
+    done
+fi
+
+if [ $# -ne 3 ] && [ $# -ne 2 ];
+then
+    usage;
+    exit 1;
+fi
+
+do_migrate $@;
+
+echo "Done."
diff --git a/scripts/migrate.py b/scripts/migrate.py
new file mode 100755
index 0000000000..f0f8211328
--- /dev/null
+++ b/scripts/migrate.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python3
+#coding: utf-8
+
+import os
+import re
+import sys
+import logging
+import queue
+import threading
+from threading import Thread
+from uuid import UUID
+from seafobj.objstore_factory import SeafObjStoreFactory
+
+logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
+
+class Worker(Thread):
+    def __init__(self, do_work, task_queue):
+        Thread.__init__(self)
+        self.do_work = do_work
+        self.task_queue = task_queue
+
+    def run(self):
+        while True:
+            try:
+                task = self.task_queue.get()
+                if task is None:
+                    break
+                self.do_work(task)
+            except Exception as e:
+                logging.warning('Failed to execute task: %s' % e)
+            finally:
+                self.task_queue.task_done()
+
+class ThreadPool(object):
+    def __init__(self, do_work, nworker=20):
+        self.do_work = do_work
+        self.nworker = nworker
+        self.task_queue = queue.Queue(maxsize = 2000)
+
+    def start(self):
+        for i in range(self.nworker):
+            Worker(self.do_work, self.task_queue).start()
+
+    def put_task(self, task):
+        self.task_queue.put(task)
+
+    def join(self):
+        self.task_queue.join()
+        # notify all thread to stop
+        for i in range(self.nworker):
+            self.task_queue.put(None)
+
+class Task(object):
+    def __init__(self, repo_id, repo_version, obj_id):
+        self.repo_id = repo_id
+        self.repo_version = repo_version
+        self.obj_id = obj_id
+
+class ObjMigrateWorker(Thread):
+    def __init__(self, orig_store, dest_store, dtype, repo_id = None):
+        Thread.__init__(self)
+        self.lock = threading.Lock()
+        self.dtype = dtype
+        self.orig_store = orig_store
+        self.dest_store = dest_store
+        self.repo_id = repo_id
+        self.thread_pool = ThreadPool(self.do_work)
+        self.write_count = 0
+        self.fetch_count = 0
+        self.dest_objs = {}
+        self.object_list_file_path = ''
+        self.fd = None
+        self.exit_code = 0
+        self.exception = None
+    
+    def run(self):
+        try:
+            self._run()
+        except Exception as e:
+            self.exit_code = 1
+            self.exception = e
+
+    def _run(self):
+        if 'OBJECT_LIST_FILE_PATH' in os.environ:
+            if self.repo_id:
+                self.object_list_file_path = '.'.join(['_'.join([os.environ['OBJECT_LIST_FILE_PATH'], self.repo_id]), self.dtype])
+            else:
+                self.object_list_file_path = '.'.join([os.environ['OBJECT_LIST_FILE_PATH'], self.dtype])
+
+        if self.object_list_file_path and \
+        os.path.exists(self.object_list_file_path) and \
+        os.path.getsize(self.object_list_file_path) > 0:
+            logging.info('Start to load [%s] destination object from file' % self.dtype)
+            with open(self.object_list_file_path, 'r') as f:
+                for line in f:
+                    obj = line.rstrip('\n').split('/', 1)
+                    if self.invalid_obj(obj):
+                        continue
+                    self.fetch_count += 1
+                    if obj[0] in self.dest_objs:
+                        self.dest_objs[obj[0]].add(obj[1])
+                    else:
+                        self.dest_objs[obj[0]] = set()
+                        self.dest_objs[obj[0]].add(obj[1])
+
+        else:
+            logging.info('Start to fetch [%s] object from destination' % self.dtype)
+            if self.object_list_file_path:
+                f = open(self.object_list_file_path, 'a')
+            for obj in self.dest_store.list_objs(self.repo_id):
+                if self.invalid_obj(obj):
+                    continue
+                self.fetch_count += 1
+                if obj[0] in self.dest_objs:
+                    self.dest_objs[obj[0]].add(obj[1])
+                else:
+                    self.dest_objs[obj[0]] = set()
+                    self.dest_objs[obj[0]].add(obj[1])
+                if self.object_list_file_path:
+                    f.write('/'.join(obj[:2]) + '\n')
+                    if self.fetch_count % 100 == 0:
+                        f.flush()
+            if self.object_list_file_path:
+                f.close()
+        logging.info('[%s] [%d] objects exist in destination' % (self.dtype, self.fetch_count))
+
+        if self.object_list_file_path:
+            self.fd = open(self.object_list_file_path, 'a')
+        logging.info('Start to migrate [%s] object' % self.dtype)
+        self.thread_pool.start()
+        self.migrate()
+        self.thread_pool.join()
+        if self.object_list_file_path:
+            self.fd.close()
+        logging.info('Complete migrate [%s] object' % self.dtype)
+
+    def do_work(self, task):
+        try:
+            exists = False
+            if task.repo_id in self.dest_objs:
+                if task.obj_id in self.dest_objs[task.repo_id]:
+                    exists = True
+
+        except Exception as e:
+            logging.warning('[%s] Failed to check object %s existence from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e))
+            raise
+
+        if not exists:
+            try:
+                data = self.orig_store.read_obj_raw(task.repo_id, task.repo_version, task.obj_id)
+            except Exception as e:
+                logging.warning('[%s] Failed to read object %s from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e))
+                raise
+
+            try:
+                self.dest_store.write_obj(data, task.repo_id, task.obj_id)
+                self.write_count += 1
+                if self.write_count % 100 == 0:
+                    logging.info('[%s] task: %s objects written to destination.', self.dtype, self.write_count)
+
+                if self.object_list_file_path:
+                    with self.lock:
+                        self.fd.write('/'.join([task.repo_id, task.obj_id]) + '\n')
+                        if self.write_count % 100 == 0:
+                            self.fd.flush()
+            except Exception as e:
+                logging.warning('[%s] Failed to write object %s from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e))
+                raise
+
+    def migrate(self):
+        try:
+            obj_list = self.orig_store.list_objs(self.repo_id)
+        except Exception as e:
+            logging.warning('[%s] Failed to list all objects: %s' % (self.dtype, e))
+            raise
+
+        for obj in obj_list:
+            if self.invalid_obj(obj):
+                continue
+            repo_id = obj[0]
+            obj_id = obj[1]
+            task = Task(repo_id, 1, obj_id)
+            self.thread_pool.put_task(task)
+
+    def invalid_obj(self, obj):
+        if len(obj) < 2:
+            return True
+        try:
+            UUID(obj[0], version = 4)
+        except ValueError:
+            return True
+        if len(obj[1]) != 40 or not re.match('\A[0-9a-f]+\Z', obj[1]):
+            return True
+        return False
+
+def main():
+    try:
+        orig_obj_factory = SeafObjStoreFactory()
+        os.environ['SEAFILE_CENTRAL_CONF_DIR'] = os.environ['DEST_SEAFILE_CENTRAL_CONF_DIR']
+    except KeyError:
+        logging.warning('DEST_SEAFILE_CENTRAL_CONF_DIR environment variable is not set.\n')
+        sys.exit()
+
+    dest_obj_factory = SeafObjStoreFactory()
+
+    dtypes = ['commits', 'fs', 'blocks']
+    for dtype in dtypes:
+        orig_store = orig_obj_factory.get_obj_store(dtype)
+        dest_store = dest_obj_factory.get_obj_store(dtype)
+        ObjMigrateWorker(orig_store, dest_store, dtype).start()
+
+if __name__ == '__main__':
+    main()
diff --git a/scripts/migrate.sh b/scripts/migrate.sh
new file mode 100755
index 0000000000..deabe98aad
--- /dev/null
+++ b/scripts/migrate.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+echo ""
+
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+TOPDIR=$(dirname "${INSTALLPATH}")
+default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
+default_conf_dir=${TOPDIR}/conf
+
+migrate=${INSTALLPATH}/migrate.py
+
+script_name=$0
+function usage () {
+    echo "usage : "
+    echo "    ./$(basename ${script_name}) destination_config_file_path"
+    echo ""
+}
+
+function check_python_executable() {
+    if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
+        return 0
+    fi
+
+    if which python3 2>/dev/null 1>&2; then
+        PYTHON=python3
+    elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
+        echo
+        echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
+        echo
+        exit 1
+    else
+        PYTHON="python"$(python --version | cut -b 8-10)
+        if !which $PYTHON 2>/dev/null 1>&2; then
+            echo
+            echo "Can't find a python executable of $PYTHON in PATH"
+            echo "Install $PYTHON before continue."
+            echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
+            echo
+            exit 1
+        fi
+    fi
+}
+
+function do_migrate () {
+    export CCNET_CONF_DIR=${default_ccnet_conf_dir}
+    export SEAFILE_CONF_DIR=${default_seafile_data_dir}
+    export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir}
+    export DEST_SEAFILE_CENTRAL_CONF_DIR=${dest_seafile_central_conf_dir}
+
+    export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
+
+    $PYTHON ${migrate}
+}
+
+check_python_executable;
+
+if [ $# -gt 0 ];
+then
+    for param in $@;
+    do
+        if [ ${param} = "-h" -o ${param} = "--help" ];
+        then
+            usage;
+            exit 1;
+        fi
+    done
+fi
+
+if [ $# -ne 1 ];
+then
+    usage;
+    exit 1;
+fi
+
+dest_seafile_central_conf_dir="$1"
+do_migrate;
+
+echo "Done."
diff --git a/scripts/pro.py b/scripts/pro.py
new file mode 100755
index 0000000000..69cb402eb9
--- /dev/null
+++ b/scripts/pro.py
@@ -0,0 +1,817 @@
+#!/usr/bin/env python3
+
+'''
+Setup/Start/Stop the extra components of Seafile Professional
+
+The diretory layout:
+- haiwen
+  - seafile-server-1.8.0
+    - seafile.sh
+    - seahub.sh
+    - seafile/
+    - seahub/
+    - pro
+      - pro.py
+      - python
+        - sqlalchemy/
+        - pyes/
+        - thrift/
+        - libevent
+        - python-daemon/
+        - lockfile/
+        - seafes/
+        - seafevents/
+        - seaf-dav/
+      - elasticsearch/
+      - misc
+        - seahub_extra.sql
+
+  - seafile-license.txt
+  - seahub.db
+  - seahub_settings.py
+  - ccnet/
+  - seafile-data/
+  - seahub-data/
+  - pro-data
+    - search/
+      - data/
+      - logs/
+    - seafevents.conf
+    - seafdav.conf
+    - seafevents.db
+    - index.log
+    - seafevents.log
+'''
+
+import os
+import sys
+import glob
+import subprocess
+import io
+import getpass
+
+try:
+    import pymysql
+except:
+    pass
+
+import configparser
+
+########################
+## Helper functions
+########################
+
+class InvalidAnswer(Exception):
+    def __init__(self, msg):
+        Exception.__init__(self)
+        self.msg = msg
+    def __str__(self):
+        return self.msg
+
+class Utils(object):
+    '''Groups all helper functions here'''
+    @staticmethod
+    def highlight(content):
+        '''Add ANSI color to content to get it highlighted on terminal'''
+        return '\x1b[33m%s\x1b[m' % content
+
+    @staticmethod
+    def info(msg, newline=True):
+        sys.stdout.write(msg)
+        if newline:
+            sys.stdout.write('\n')
+
+    @staticmethod
+    def error(msg):
+        '''Print error and exit'''
+        print()
+        print('Error: ' + msg)
+        sys.exit(1)
+
+    @staticmethod
+    def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
+        '''Run a program and wait it to finish, and return its exit code. The
+        standard output of this program is supressed.
+
+        '''
+        with open(os.devnull, 'w') as devnull:
+            if suppress_stdout:
+                stdout = devnull
+            else:
+                stdout = sys.stdout
+
+            if suppress_stderr:
+                stderr = devnull
+            else:
+                stderr = sys.stderr
+
+            proc = subprocess.Popen(argv,
+                                    cwd=cwd,
+                                    stdout=stdout,
+                                    stderr=stderr,
+                                    env=env)
+            return proc.wait()
+
+    @staticmethod
+    def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
+        '''Like run_argv but specify a command line string instead of argv'''
+        with open(os.devnull, 'w') as devnull:
+            if suppress_stdout:
+                stdout = devnull
+            else:
+                stdout = sys.stdout
+
+            if suppress_stderr:
+                stderr = devnull
+            else:
+                stderr = sys.stderr
+
+            proc = subprocess.Popen(cmdline,
+                                    cwd=cwd,
+                                    stdout=stdout,
+                                    stderr=stderr,
+                                    env=env,
+                                    shell=True)
+            return proc.wait()
+
+    @staticmethod
+    def prepend_env_value(name, value, env=None, seperator=':'):
+        '''prepend a new value to a list'''
+        if env is None:
+            env = os.environ
+
+        try:
+            current_value = env[name]
+        except KeyError:
+            current_value = ''
+
+        new_value = value
+        if current_value:
+            new_value += seperator + current_value
+
+        env[name] = new_value
+
+    @staticmethod
+    def must_mkdir(path):
+        '''Create a directory, exit on failure'''
+        try:
+            os.mkdir(path)
+        except OSError as e:
+            Utils.error('failed to create directory %s:%s' % (path, e))
+
+    @staticmethod
+    def find_in_path(prog):
+        if 'win32' in sys.platform:
+            sep = ';'
+        else:
+            sep = ':'
+
+        dirs = os.environ['PATH'].split(sep)
+        for d in dirs:
+            d = d.strip()
+            if d == '':
+                continue
+            path = os.path.join(d, prog)
+            if os.path.exists(path):
+                return path
+
+        return None
+
+    @staticmethod
+    def read_config(fn=None):
+        '''Return a case sensitive ConfigParser by reading the file "fn"'''
+        cp = configparser.ConfigParser()
+        cp.optionxform = str
+        if fn:
+            cp.read(fn)
+
+        return cp
+
+    @staticmethod
+    def write_config(cp, fn):
+        '''Return a case sensitive ConfigParser by reading the file "fn"'''
+        with open(fn, 'w') as fp:
+            cp.write(fp)
+
+    @staticmethod
+    def ask_question(desc,
+                     key=None,
+                     note=None,
+                     default=None,
+                     validate=None,
+                     yes_or_no=False,
+                     password=False):
+        '''Ask a question, return the answer.
+        @desc description, e.g. "What is the port of ccnet?"
+
+        @key a name to represent the target of the question, e.g. "port for
+        ccnet server"
+
+        @note additional information for the question, e.g. "Must be a valid
+        port number"
+
+        @default the default value of the question. If the default value is
+        not None, when the user enter nothing and press [ENTER], the default
+        value would be returned
+
+        @validate a function that takes the user input as the only parameter
+        and validate it. It should return a validated value, or throws an
+        "InvalidAnswer" exception if the input is not valid.
+
+        @yes_or_no If true, the user must answer "yes" or "no", and a boolean
+        value would be returned
+
+        @password If true, the user input would not be echoed to the
+        console
+
+        '''
+        assert key or yes_or_no
+        # Format description
+        print()
+        if note:
+            desc += '\n' + note
+
+        desc += '\n'
+        if yes_or_no:
+            desc += '[ yes or no ]'
+        else:
+            if default:
+                desc += '[ default "%s" ]' % default
+            else:
+                desc += '[ %s ]' % key
+
+        desc += ' '
+        while True:
+            # prompt for user input
+            if password:
+                answer = getpass.getpass(desc).strip()
+            else:
+                answer = input(desc).strip()
+
+            # No user input: use default
+            if not answer:
+                if default:
+                    answer = default
+                else:
+                    continue
+
+            # Have user input: validate answer
+            if yes_or_no:
+                if answer not in ['yes', 'no']:
+                    print(Utils.highlight('\nPlease answer yes or no\n'))
+                    continue
+                else:
+                    return answer == 'yes'
+            else:
+                if validate:
+                    try:
+                        return validate(answer)
+                    except InvalidAnswer as e:
+                        print(Utils.highlight('\n%s\n' % e))
+                        continue
+                else:
+                    return answer
+
+    @staticmethod
+    def validate_port(port):
+        try:
+            port = int(port)
+        except ValueError:
+            raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
+
+        if port <= 0 or port > 65535:
+            raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
+
+        return port
+
+    @staticmethod
+    def get_python_executable():
+        '''Find a suitable python executable'''
+        try_list = [
+            'python3',
+        ]
+
+        for prog in try_list:
+            path = Utils.find_in_path(prog)
+            if path is not None:
+                return path
+
+        path = os.environ.get('PYTHON', 'python')
+
+        if not path:
+            Utils.error('Can not find python executable')
+
+        return path
+
+    @staticmethod
+    def pkill(process):
+        '''Kill the program with the given name'''
+        argv = [
+            'pkill', '-f', process
+        ]
+
+        Utils.run_argv(argv)
+
+class EnvManager(object):
+    '''System environment and directory layout'''
+    def __init__(self):
+        self.install_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+        self.top_dir = os.path.dirname(self.install_path)
+        self.bin_dir = os.path.join(self.install_path, 'seafile', 'bin')
+        self.central_config_dir = os.path.join(self.top_dir, 'conf')
+
+        self.pro_data_dir = os.path.join(self.top_dir, 'pro-data')
+        self.pro_program_dir = os.path.join(self.install_path, 'pro')
+        self.pro_pylibs_dir = os.path.join(self.pro_program_dir, 'python')
+        self.pro_misc_dir = os.path.join(self.pro_program_dir, 'misc')
+
+        self.seafes_dir = os.path.join(self.pro_pylibs_dir, 'seafes')
+        self.seahub_dir = os.path.join(self.install_path, 'seahub')
+
+        self.ccnet_dir = os.path.join(self.top_dir, 'ccnet')
+        self.seafile_dir = os.path.join(self.top_dir, 'seafile-data')
+        self.central_config_dir = os.path.join(self.top_dir, 'conf')
+        self.seafile_rpc_pipe_path = os.path.join(self.install_path, 'runtime');
+
+    def get_seahub_env(self):
+        '''Prepare for seahub syncdb'''
+        env = dict(os.environ)
+        env['CCNET_CONF_DIR'] = self.ccnet_dir
+        env['SEAFILE_CONF_DIR'] = self.seafile_dir
+        env['SEAFILE_CENTRAL_CONF_DIR'] = self.central_config_dir
+        env['SEAFILE_RPC_PIPE_PATH'] = self.seafile_rpc_pipe_path
+        env['SEAFES_DIR'] = self.seafes_dir
+        env['SEAHUB_DIR'] = self.seahub_dir
+        self.setup_python_path(env)
+        return env
+
+    def setup_python_path(self, env):
+        '''And PYTHONPATH and CCNET_CONF_DIR/SEAFILE_CONF_DIR to env, which is
+        needed by seahub
+
+        '''
+        extra_python_path = [
+            self.pro_pylibs_dir,
+
+            os.path.join(self.top_dir, 'conf'), # LDAP sync has to access seahub_settings.py
+            os.path.join(self.install_path, 'seahub', 'thirdpart'),
+            os.path.join(self.install_path, 'seahub-extra'),
+            os.path.join(self.install_path, 'seahub-extra', 'thirdparts'),
+
+            os.path.join(self.install_path, 'seafile/lib/python3/site-packages'),
+            os.path.join(self.install_path, 'seafile/lib64/python3/site-packages'),
+        ]
+
+        for path in extra_python_path:
+            Utils.prepend_env_value('PYTHONPATH', path, env=env)
+
+########################
+## END helper functions
+########################
+
+class Elasticsearch(object):
+    def __init__(self):
+        self.es_executable = os.path.join(env_mgr.pro_program_dir,
+                                          'elasticsearch', 'bin', 'elasticsearch')
+
+        self.es_logs_dir = os.path.join(env_mgr.pro_data_dir, 'search', 'logs')
+        self.es_data_dir = os.path.join(env_mgr.pro_data_dir, 'search', 'data')
+
+    def start(self):
+        '''Start Elasticsearch. We use -D command line args to specify the
+        location of logs and data
+
+        '''
+        argv = [
+            self.es_executable,
+            '-Des.path.logs=%s' % self.es_logs_dir,
+            '-Des.path.data=%s' % self.es_data_dir,
+        ]
+        Utils.run_argv(argv, suppress_stdout=True, suppress_stderr=True)
+
+    def stop(self):
+        Utils.pkill('org.elasticsearch.bootstrap.ElasticSearch')
+
+
+class DBConf(object):
+    '''Abstract class for database configuration'''
+    TYPE_SQLITE = 'sqlite'
+    TYPE_MYSQL = 'mysql'
+
+    DB_SECTION = 'DATABASE'
+    def __init__(self, db_type):
+        self.db_type = db_type
+
+    def generate_conf(self, config):
+        raise NotImplementedError
+
+    def create_extra_tables(self):
+        raise NotImplementedError
+
+    def generate_config_text(self):
+        config = Utils.read_config()
+        self.generate_conf(config)
+
+        buf = io.StringIO()
+        config.write(buf)
+        buf.flush()
+
+        return buf.getvalue()
+
+class MySQLDBConf(DBConf):
+    def __init__(self):
+        DBConf.__init__(self, self.TYPE_MYSQL)
+
+        self.mysql_host = ''
+        self.mysql_port = ''
+        self.mysql_user = ''
+        self.mysql_password = ''
+        self.mysql_db = ''
+
+        self.conn = None
+
+    def generate_conf(self, config):
+        # [DATABASE]
+        # type=mysql
+        # path=x.db
+        # username=seafevents
+        # password=seafevents
+        # name=seafevents
+        # host=localhost
+        config.add_section(self.DB_SECTION)
+        config.set(self.DB_SECTION, 'type', 'mysql')
+
+        if self.mysql_host:
+            config.set(self.DB_SECTION, 'host', self.mysql_host)
+
+        if self.mysql_port:
+            config.set(self.DB_SECTION, 'port', str(self.mysql_port))
+
+        config.set(self.DB_SECTION, 'username', self.mysql_user)
+        config.set(self.DB_SECTION, 'password', self.mysql_password)
+        config.set(self.DB_SECTION, 'name', self.mysql_db)
+
+    def create_extra_tables(self):
+        self.get_conn()
+        sql_file = os.path.join(env_mgr.pro_misc_dir, 'seahub_extra.mysql.sql')
+        with open(sql_file, 'r') as fp:
+            content = fp.read()
+
+        sqls = content.split(';')
+
+        for sql in sqls:
+            sql = sql.strip()
+            if not sql:
+                continue
+
+            print('>>> sql is', sql, len(sql))
+            self.exec_sql(sql)
+
+    def exec_sql(self, sql):
+        cursor = self.conn.cursor()
+        try:
+            cursor.execute(sql)
+        except Exception as e:
+            if isinstance(e, pymysql.err.OperationalError):
+                Utils.error('Failed to create extra tables: %s' % e.args[1])
+            else:
+                Utils.error('Failed to create extra tables: %s' % e)
+
+    def get_conn(self):
+        print('host is', self.mysql_host)
+        print('port is', self.mysql_port)
+        kwargs = dict(user=self.mysql_user,
+                      passwd=self.mysql_password,
+                      db=self.mysql_db)
+        if self.mysql_port:
+            kwargs['port'] = self.mysql_port
+        if self.mysql_host:
+            kwargs['host'] = self.mysql_host
+
+        try:
+            self.conn = pymysql.connect(**kwargs)
+        except Exception as e:
+            if isinstance(e, pymysql.err.OperationalError):
+                Utils.error('Failed to connect to mysql database %s: %s' % (self.mysql_db, e.args[1]))
+            else:
+                Utils.error('Failed to connect to mysql database %s: %s' % (self.mysql_db, e))
+
+class SQLiteDBConf(DBConf):
+    def __init__(self):
+        DBConf.__init__(self, self.TYPE_SQLITE)
+        self.db_path = os.path.join(env_mgr.pro_data_dir, 'seafevents.db')
+
+    def generate_conf(self, config):
+        # [DATABASE]
+        # type=sqlite3
+        # path=x.db
+        config.add_section(self.DB_SECTION)
+        config.set(self.DB_SECTION, 'type', 'sqlite3')
+        config.set(self.DB_SECTION, 'path', self.db_path)
+
+    def create_extra_tables(self):
+        seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db')
+        sql_file = os.path.join(env_mgr.pro_misc_dir, 'seahub_extra.sqlite3.sql')
+
+        Utils.info('Create extra database tables ... ', newline=False)
+        cmd = 'sqlite3 %s < %s' % (seahub_db, sql_file)
+        if os.system(cmd) != 0:
+            Utils.error('\nfailed to create seahub extra database tables')
+        Utils.info('Done')
+
+
+class ProfessionalConfigurator(object):
+    '''Main abstract class for the config process '''
+    def __init__(self, args, migrate=False):
+        self.args = args
+        self.migrate = migrate
+        self.db_type = ''
+        self.db_config = None   # database config strategy
+        self.seafevents_conf = os.path.join(env_mgr.central_config_dir, 'seafevents.conf')
+
+    def check_pre_condition(self):
+        raise NotImplementedError
+
+    def config(self):
+        raise NotImplementedError
+
+    def generate(self):
+        self.generate_seafevents_conf()
+
+    def generate_seafevents_conf(self):
+        template = '''\
+%(db_config_text)s
+
+[AUDIT]
+enabled = true
+
+[INDEX FILES]
+enabled = true
+interval = 10m
+
+highlight = fvh
+
+## If true, indexes the contents of office/pdf files while updating search index
+## Note: If you change this option from "false" to "true", then you need to clear the search index and update the index again. See the FAQ for details.
+index_office_pdf = true
+
+[SEAHUB EMAIL]
+enabled = true
+
+## interval of sending Seahub email. Can be s(seconds), m(minutes), h(hours), d(days)
+interval = 30m
+
+# Enable statistics
+[STATISTICS]
+enabled=true
+'''
+        db_config_text = self.db_config.generate_config_text()
+        if not os.path.exists(env_mgr.pro_data_dir):
+            os.makedirs(env_mgr.pro_data_dir)
+        os.chmod(env_mgr.pro_data_dir, 0o700)
+
+        with open(self.seafevents_conf, 'w') as fp:
+            fp.write(template % dict(db_config_text=db_config_text))
+
+class MigratingProfessionalConfigurator(ProfessionalConfigurator):
+    '''This scripts is used standalone to migrate from community version to
+    professional version
+
+    '''
+    def __init__(self, args):
+        ProfessionalConfigurator.__init__(self, args, migrate=True)
+
+    def check_pre_condition(self):
+        self.check_java()
+
+    def config(self):
+        self.detect_db_type()
+        # self.create_extra_tables()
+        self.update_avatars_link()
+
+    def detect_db_type(self):
+        '''Read database info from seahub_settings.py'''
+        sys.path.insert(0, env_mgr.central_config_dir)
+        try:
+            from seahub_settings import DATABASES # pylint: disable=F0401
+        except ImportError:
+            print('Failed to import "DATABASES" from seahub_settings.py, assuming sqlite3')
+            self.db_config = SQLiteDBConf()
+            return
+
+        try:
+            default_config = DATABASES['default']
+            if default_config['ENGINE'] == 'django.db.backends.mysql':
+                db_config = MySQLDBConf()
+                db_config.mysql_host = default_config.get('HOST', '')
+                db_config.mysql_port = default_config.get('PORT', '')
+                db_config.mysql_user = default_config.get('USER', '')
+                db_config.mysql_password = default_config.get('PASSWORD', '')
+                db_config.mysql_db = default_config['NAME']
+
+                if db_config.mysql_port:
+                    db_config.mysql_port = int(db_config.mysql_port)
+
+                print('Your seafile server is using mysql')
+
+                self.db_config = db_config
+            else:
+                print('Your seafile server is using sqlite3')
+                self.db_config = SQLiteDBConf()
+
+        except KeyError:
+            Utils.error('Error in your config %s' % \
+                        os.path.join(env_mgr.top_dir, 'seahub_settings.py'))
+
+    def update_avatars_link(self):
+        minor_upgrade_script = os.path.join(env_mgr.install_path, 'upgrade', 'minor-upgrade.sh')
+        argv = [
+            minor_upgrade_script
+        ]
+
+        if Utils.run_argv(argv) != 0:
+            Utils.error('failed to update avatars folder')
+
+    def check_java(self):
+        Utils.info('\nChecking java ... ', newline=False)
+        if not Utils.find_in_path('java'):
+            msg = '''\nJava is not found. instal it first.\n
+    On Debian/Ubuntu:     apt-get install default-jre
+    On CentOS/RHEL:       yum install jre
+    '''
+            Utils.error(msg)
+
+        Utils.info('Done')
+
+    def create_extra_tables(self):
+        '''Create seahub-extra database tables'''
+        self.db_config.create_extra_tables()
+
+class SetupProfessionalConfigurator(ProfessionalConfigurator):
+    '''This script is invokded by setup-seafile.sh/setup-seafile-mysql.sh to
+    generate seafile pro related conf
+
+    To setup sqlite3:
+    ./pro.py setup
+
+    To setup mysql:
+    ./pro.py setup --mysql
+                   --mysql_host=
+                   --mysql_port=
+                   --mysql_user=
+                   --mysql_password=
+                   --mysql_db=
+
+    '''
+    def __init__(self, args):
+        ProfessionalConfigurator.__init__(self, args, migrate=False)
+
+    def config(self):
+        if self.args.mysql:
+            db_config = MySQLDBConf()
+            db_config.mysql_host = self.args.mysql_host
+            db_config.mysql_port = self.args.mysql_port
+            db_config.mysql_user = self.args.mysql_user
+            db_config.mysql_password = self.args.mysql_password
+            db_config.mysql_db = self.args.mysql_db
+        else:
+            db_config = SQLiteDBConf()
+
+        self.db_config = db_config
+
+    def check_pre_condition(self):
+        pass
+
+def do_setup(args):
+    global pro_config
+
+    if args.migrate:
+        pro_config = MigratingProfessionalConfigurator(args)
+    else:
+        pro_config = SetupProfessionalConfigurator(args)
+
+    pro_config.check_pre_condition()
+    pro_config.config()
+    pro_config.generate()
+
+def handle_search_commands(args):
+    '''provide search related utility'''
+    if args.update:
+        update_search_index()
+    elif args.clear:
+        delete_search_index()
+
+def get_seafes_env():
+    env = env_mgr.get_seahub_env()
+    events_conf = os.path.join(env_mgr.central_config_dir, 'seafevents.conf')
+
+    env['EVENTS_CONFIG_FILE'] = events_conf
+
+    return env
+
+def update_search_index():
+    argv = [
+        Utils.get_python_executable(),
+        '-m', 'seafes.index_local',
+        '--loglevel', 'debug',
+        'update',
+    ]
+
+    Utils.info('\nUpdating search index, this may take a while...\n')
+
+    Utils.run_argv(argv, env=get_seafes_env())
+
+def delete_search_index():
+    choice = None
+    while choice not in ('y', 'n', ''):
+        prompt = 'Delete seafile search index ([y]/n)? '
+        choice = input(prompt).strip()
+
+    if choice == 'n':
+        return
+
+    argv = [
+        Utils.get_python_executable(),
+        '-m', 'seafes.index_local',
+        '--loglevel', 'debug',
+        'clear',
+    ]
+
+    Utils.info('\nDelete search index, this may take a while...\n')
+
+    Utils.run_argv(argv, env=get_seafes_env())
+
+def handle_ldap_sync_commands(args):
+    if args.test:
+        argv = [
+            Utils.get_python_executable(),
+            '-m', 'seafevents.ldap_syncer.run_ldap_sync',
+            '-t',
+        ]
+    else:
+        argv = [
+            Utils.get_python_executable(),
+            '-m', 'seafevents.ldap_syncer.run_ldap_sync',
+        ]
+
+    Utils.run_argv(argv, env=env_mgr.get_seahub_env())
+
+def handle_virus_scan_commands(args):
+    argv = [
+        Utils.get_python_executable(),
+        '-m', 'seafevents.virus_scanner.run_virus_scan',
+        '-c', os.path.join(env_mgr.central_config_dir, 'seafevents.conf'),
+    ]
+
+    Utils.run_argv(argv, env=env_mgr.get_seahub_env())
+
+pro_config = None
+env_mgr = EnvManager()
+
+def main():
+    try:
+        import argparse
+    except ImportError:
+        sys.path.insert(0, glob.glob(os.path.join(env_mgr.pro_pylibs_dir, 'argparse*.egg'))[0])
+        import argparse
+
+    parser = argparse.ArgumentParser()
+    subparsers = parser.add_subparsers(title='subcommands', description='')
+
+    # setup
+    parser_setup = subparsers.add_parser('setup', help='Setup extra components of seafile pro')
+    parser_setup.set_defaults(func=do_setup)
+    parser_setup.add_argument('--migrate', help='migrate from community version', action='store_true')
+
+    # for non-migreate setup
+    parser_setup.add_argument('--mysql', help='use mysql', action='store_true')
+    parser_setup.add_argument('--mysql_host')
+    parser_setup.add_argument('--mysql_port')
+    parser_setup.add_argument('--mysql_user')
+    parser_setup.add_argument('--mysql_password')
+    parser_setup.add_argument('--mysql_db')
+
+    # search
+    parser_search = subparsers.add_parser('search', help='search related utility commands')
+    parser_search.add_argument('--update', help='update seafile search index', action='store_true')
+    parser_search.add_argument('--clear', help='delete seafile search index', action='store_true')
+    parser_search.set_defaults(func=handle_search_commands)
+
+    # ldapsync
+    parser_ldap_sync = subparsers.add_parser('ldapsync', help='ldap sync commands')
+    parser_ldap_sync.add_argument('-t', '--test', help='test ldap sync', action='store_true')
+    parser_ldap_sync.set_defaults(func=handle_ldap_sync_commands)
+
+    # virus scan
+    parser_virus_scan = subparsers.add_parser('virus_scan', help='virus scan commands')
+    parser_virus_scan.set_defaults(func=handle_virus_scan_commands)
+
+    if len(sys.argv) == 1:
+        print(parser.format_help())
+        return
+
+    args = parser.parse_args()
+    args.func(args)
+
+if __name__ == '__main__':
+    main()
diff --git a/scripts/remove-objs.py b/scripts/remove-objs.py
new file mode 100644
index 0000000000..445e415390
--- /dev/null
+++ b/scripts/remove-objs.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+
+import sys
+import os
+import logging
+from seafobj.objstore_factory import objstore_factory
+
+logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
+
+def main(argv):
+    repo_id = argv[1]
+    orig_storage_id = argv[2]
+
+    dtypes = ['commits', 'fs', 'blocks']
+    for dtype in dtypes:
+        if 'OBJECT_LIST_FILE_PATH' in os.environ:
+            object_list_file_path = '.'.join(['_'.join([os.environ['OBJECT_LIST_FILE_PATH'], repo_id]), dtype])
+        else:
+            logging.warning('OBJECT_LIST_FILE_PATH environment does not exist.')
+            sys.exit()
+
+        obj_stores = objstore_factory.get_obj_stores(dtype)
+        #If these storage ids passed in do not exist in conf, stop migrate this repo.
+        if orig_storage_id not in obj_stores:
+            logging.warning('Storage id passed in does not exist in configuration.\n')
+            sys.exit()
+
+        orig_store = obj_stores[orig_storage_id]
+
+        with open(object_list_file_path, 'r') as f:
+            for line in f:
+                obj = line.rstrip('\n').split('/', 1)
+                try:
+                    orig_store.remove_obj(obj[0], obj[1])
+                except Exception as e:
+                    logging.warning('Failed to remove object %s from repo %s:%s' % (obj[1], obj[0], e))
+
+    logging.info('The process of remove repo [%s] is over.\n', repo_id)
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/scripts/remove-objs.sh b/scripts/remove-objs.sh
new file mode 100755
index 0000000000..41d8043e61
--- /dev/null
+++ b/scripts/remove-objs.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+echo ""
+
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+TOPDIR=$(dirname "${INSTALLPATH}")
+default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
+default_conf_dir=${TOPDIR}/conf
+seafile_rpc_pipe_path=${INSTALLPATH}/runtime
+remove=${INSTALLPATH}/remove-objs.py
+
+script_name=$0
+function usage () {
+    echo "usage : "
+    echo "    ./$(basename ${script_name})" \
+         "<repo id to migrate>" \
+         "<storage id>" \
+    echo""
+}
+
+function check_python_executable() {
+    if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
+        return 0
+    fi
+
+    if which python3 2>/dev/null 1>&2; then
+        PYTHON=python3
+    elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
+        echo
+        echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
+        echo
+        exit 1
+    else
+        PYTHON="python"$(python --version | cut -b 8-10)
+        if !which $PYTHON 2>/dev/null 1>&2; then
+            echo
+            echo "Can't find a python executable of $PYTHON in PATH"
+            echo "Install $PYTHON before continue."
+            echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
+            echo
+            exit 1
+        fi
+    fi
+}
+
+function do_remove () {
+    export CCNET_CONF_DIR=${default_ccnet_conf_dir}
+    export SEAFILE_CONF_DIR=${default_seafile_data_dir}
+    export SEAFILE_CENTRAL_CONF_DIR=${default_conf_dir}
+    export SEAFILE_RPC_PIPE_PATH=${seafile_rpc_pipe_path}
+    export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
+    $PYTHON ${remove} $@
+}
+
+check_python_executable;
+
+if [ $# -gt 0 ];
+then
+    for param in $@;
+    do
+        if [ ${param} = "-h" -o ${param} = "--help" ];
+        then
+            usage;
+            exit 1;
+        fi
+    done
+fi
+
+if [ $# -ne 2 ];
+then
+    usage;
+    exit 1;
+fi
+
+do_remove $@;
+
+echo "Done."
diff --git a/scripts/run_index_master.sh b/scripts/run_index_master.sh
new file mode 100755
index 0000000000..12ec2d43e3
--- /dev/null
+++ b/scripts/run_index_master.sh
@@ -0,0 +1,138 @@
+#!/bin/bash
+
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+TOPDIR=$(dirname "${INSTALLPATH}")
+default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
+central_config_dir=${TOPDIR}/conf
+pro_pylibs_dir=${INSTALLPATH}/pro/python
+pidfile=${INSTALLPATH}/runtime/index_master.pid
+
+
+script_name=$0
+function usage () {
+    echo "Usage: "
+    echo
+    echo "  $(basename ${script_name}) { start | stop | restart | python-env }"
+}
+
+if [[ $1 != "start" && $1 != "stop" && $1 != "restart" && $1 != "python-env" ]]; then
+    usage;
+    exit 1;
+fi
+
+function check_python_executable() {
+    if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
+        return 0
+    fi
+
+    if which python3 2>/dev/null 1>&2; then
+        PYTHON=python3
+    elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
+        echo
+        echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
+        echo
+        exit 1
+    else
+        PYTHON="python"$(python --version | cut -b 8-10)
+        if !which $PYTHON 2>/dev/null 1>&2; then
+            echo
+            echo "Can't find a python executable of $PYTHON in PATH"
+            echo "Install $PYTHON before continue."
+            echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
+            echo
+            exit 1
+        fi
+    fi
+}
+
+function validate_seafile_data_dir () {
+    if [[ ! -d ${default_seafile_data_dir} ]]; then
+        echo "Error: there is no seafile server data directory."
+        echo "Have you run setup-seafile.sh before this?"
+        echo ""
+        exit 1;
+    fi
+}
+
+function prepare_log_dir() {
+    logdir=${TOPDIR}/logs
+    if ! [[ -d ${logsdir} ]]; then
+        if ! mkdir -p "${logdir}"; then
+            echo "ERROR: failed to create logs dir \"${logdir}\""
+            exit 1
+        fi
+    fi
+    export LOG_DIR=${logdir}
+}
+
+function before_start() {
+    check_python_executable;
+    prepare_log_dir;
+    validate_seafile_data_dir;
+
+    export SEAFILE_CONF_DIR=${default_seafile_data_dir}
+    export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
+    export SEAFES_DIR=$pro_pylibs_dir/seafes
+    export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
+    export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir
+    export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/
+    export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts
+    export EVENTS_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/seafevents.conf
+    export INDEX_MASTER_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/index-master.conf
+}
+
+run_python() {
+    before_start;
+    $PYTHON ${@:2}
+}
+
+start_index_master() {
+    before_start;
+    nohup $PYTHON -m seafes.index_master --loglevel debug --logfile ${logdir}/index_master.log start & echo $! > $pidfile
+    sleep 2
+    if ! pgrep -f "seafes.index_master" 2>/dev/null 1>&2; then
+        printf "\033[33mError:Index master failed to start.\033[m\n"
+        echo "Please try to run \"./run_index_master.sh start\" again"
+        exit 1;
+    fi
+    echo
+    echo "Index master is started"
+    echo
+}
+
+stop_index_master() {
+    if pgrep -f "seafes.index_worker" 2>/dev/null 1>&2; then
+        printf "\033[33mError:Index worker need be stopped first.\033[m\n"
+        exit 1;
+    fi
+
+    if [[ -f ${pidfile} ]]; then
+        pid=$(cat "${pidfile}")
+        echo "Stopping index master ..."
+        kill ${pid}
+        rm -f ${pidfile}
+        return 0
+    else
+        echo "Index master is not running"
+    fi
+}
+
+case $1 in 
+    "start" )
+        start_index_master;
+        ;;
+    "stop" )
+        stop_index_master;
+        ;;
+    "restart" )
+        stop_index_master
+        sleep 2
+        start_index_master
+        ;;
+    "python-env" )
+        run_python "$@"
+        ;;
+esac
+
diff --git a/scripts/run_index_worker.sh b/scripts/run_index_worker.sh
new file mode 100755
index 0000000000..e5f5a9e733
--- /dev/null
+++ b/scripts/run_index_worker.sh
@@ -0,0 +1,122 @@
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+TOPDIR=$(dirname "${INSTALLPATH}")
+default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
+central_config_dir=${TOPDIR}/conf
+pro_pylibs_dir=${INSTALLPATH}/pro/python
+pidfile=${INSTALLPATH}/runtime/index_worker.pid
+
+
+script_name=$0
+function usage () {
+    echo "Usage: "
+    echo
+    echo "  $(basename ${script_name}) { start | stop | restart }"
+}
+
+if [[ $1 != "start" && $1 != "stop" && $1 != "restart" ]]; then
+    usage;
+    exit 1;
+fi
+
+function check_python_executable() {
+    if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
+        return 0
+    fi
+
+    if which python3 2>/dev/null 1>&2; then
+        PYTHON=python3
+    elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
+        echo
+        echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
+        echo
+        exit 1
+    else
+        PYTHON="python"$(python --version | cut -b 8-10)
+        if !which $PYTHON 2>/dev/null 1>&2; then
+            echo
+            echo "Can't find a python executable of $PYTHON in PATH"
+            echo "Install $PYTHON before continue."
+            echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
+            echo
+            exit 1
+        fi
+    fi
+}
+
+function validate_seafile_data_dir () {
+    if [[ ! -d ${default_seafile_data_dir} ]]; then
+        echo "Error: there is no seafile server data directory."
+        echo "Have you run setup-seafile.sh before this?"
+        echo ""
+        exit 1;
+    fi
+}
+
+function prepare_log_dir() {
+    logdir=${TOPDIR}/logs
+    if ! [[ -d ${logsdir} ]]; then
+        if ! mkdir -p "${logdir}"; then
+            echo "ERROR: failed to create logs dir \"${logdir}\""
+            exit 1
+        fi
+    fi
+    export LOG_DIR=${logdir}
+}
+
+function before_start() {
+    check_python_executable;
+    prepare_log_dir;
+    validate_seafile_data_dir;
+
+    export SEAFILE_CONF_DIR=${default_seafile_data_dir}
+    export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
+    export SEAFES_DIR=$pro_pylibs_dir/seafes
+    export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
+    export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir
+    export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/
+    export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts
+    export EVENTS_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/seafevents.conf
+    export INDEX_SLAVE_CONFIG_FILE=${SEAFILE_CENTRAL_CONF_DIR}/index-slave.conf
+}
+
+start_index_worker() {
+    before_start;
+    nohup $PYTHON -m seafes.index_worker --loglevel debug --logfile ${logdir}/index_worker.log start & echo $! > $pidfile
+    sleep 2
+    if ! pgrep -f "seafes.index_worker" 2>/dev/null 1>&2; then
+        printf "\033[33mError:Index worker failed to start.\033[m\n"
+        echo "Please try to run \"./run_index_worker.sh start\" again"
+        exit 1;
+    fi
+    echo
+    echo "Index worker is started"
+    echo
+}
+
+stop_index_worker() {
+    if [[ -f ${pidfile} ]]; then
+        pid=$(cat "${pidfile}")
+        echo "Stopping index worker ..."
+        kill ${pid}
+        rm -f ${pidfile}
+        return 0
+    else
+        echo "Index worker is not running"
+    fi
+}
+
+case $1 in 
+    "start" )
+        start_index_worker;
+        ;;
+    "stop" )
+        stop_index_worker;
+        ;;
+    "restart" )
+        stop_index_worker
+        sleep 2
+        start_index_worker
+        ;;
+esac
diff --git a/scripts/seaf-backup-cmd.py b/scripts/seaf-backup-cmd.py
new file mode 100755
index 0000000000..bda1b88f2e
--- /dev/null
+++ b/scripts/seaf-backup-cmd.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+#coding: utf-8
+
+import json
+import argparse
+
+from seaserv import seafile_api
+from pysearpc import SearpcError
+
+def show_backup_status(args):
+    ret_str = seafile_api.get_backup_status()
+    ret_dict = json.loads(ret_str)
+    print('Total number of libraries: %s' % ret_dict['n_total'])
+    print('Number of synchronized libraries: %s' % ret_dict['n_synced'])
+    print('Number of libraries waiting for sync: %s' % ret_dict['n_waiting'])
+    print('Number of libraries syncing: %s' % ret_dict['n_syncing'])
+    print('Number of libraries failed to sync: %s\n' % ret_dict['n_error'])
+    print('List of syncing libraries:')
+    for repo in ret_dict['syncing_repos']:
+        print(repo)
+    print('')
+    print('List of libraries failed to sync:')
+    for repo in ret_dict['error_repos']:
+        print(repo)
+
+def sync_repo(args):
+    if len(args.repo_id) != 36:
+        print('Invalid repo id %s.' % args.repo_id)
+        return
+
+    try:
+        seafile_api.sync_repo_manually(args.repo_id, 1 if args.force else 0)
+    except SearpcError as e:
+        print('Failed to sync repo %s: %s.' % (args.repo_id, e))
+    else:
+        print('Sync repo %s successfully.' % args.repo_id)
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    subparser = parser.add_subparsers(title='subcommands', description='')
+    status_parser = subparser.add_parser('status', help='get backup status')
+    status_parser.set_defaults(func=show_backup_status)
+
+    sync_parser = subparser.add_parser('sync', help='sync repo')
+    sync_parser.add_argument('-f', '--force', help='force sync repo', action='store_true')
+    sync_parser.add_argument('repo_id', help='repo id to sync')
+    sync_parser.set_defaults(func=sync_repo)
+
+    args = parser.parse_args()
+    args.func(args)
diff --git a/scripts/seaf-backup-cmd.sh b/scripts/seaf-backup-cmd.sh
new file mode 100755
index 0000000000..ebf7a579e0
--- /dev/null
+++ b/scripts/seaf-backup-cmd.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+# This is a wrapper shell script for the real seaf-backup command.
+# It prepares necessary environment variables and exec the real script.
+
+function check_python_executable() {
+    if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
+        return 0
+    fi
+
+    if which python3 2>/dev/null 1>&2; then
+        PYTHON=python3
+    elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
+        echo
+        echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
+        echo
+        exit 1
+    else
+        PYTHON="python"$(python --version | cut -b 8-10)
+        if !which $PYTHON 2>/dev/null 1>&2; then
+            echo
+            echo "Can't find a python executable of $PYTHON in PATH"
+            echo "Install $PYTHON before continue."
+            echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
+            echo
+            exit 1
+        fi
+    fi
+}
+
+check_python_executable
+
+# seafile cli client requires the argparse module
+if ! $PYTHON -c 'import argparse' 2>/dev/null 1>&2; then
+    echo
+    echo "Python argparse module is required"
+    echo "see [https://pypi.python.org/pypi/argparse]"
+    echo
+    exit 1
+fi
+
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+TOPDIR=$(dirname "${INSTALLPATH}")
+central_config_dir=${TOPDIR}/conf
+default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
+
+
+function validate_seafile_data_dir () {
+    if [[ ! -d ${default_seafile_data_dir} ]]; then
+        echo "Error: there is no seafile server data directory."
+        echo "Have you run setup-seafile.sh before this?"
+        echo ""
+        exit 1;
+    fi
+}
+
+validate_seafile_data_dir
+
+SEAFILE_PYTHON_PATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart
+
+SEAF_BACKUP_CMD=${INSTALLPATH}/seaf-backup-cmd.py
+
+export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime
+export PYTHONPATH=${SEAFILE_PYTHON_PATH}:${PYTHONPATH}
+export CCNET_CONF_DIR=${default_ccnet_conf_dir}
+export SEAFILE_CONF_DIR=${default_seafile_data_dir}
+export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
+$PYTHON ${SEAF_BACKUP_CMD} "$@"
diff --git a/scripts/seaf-encrypt.sh b/scripts/seaf-encrypt.sh
new file mode 100755
index 0000000000..0c97c1fe77
--- /dev/null
+++ b/scripts/seaf-encrypt.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+echo ""
+
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+TOPDIR=$(dirname "${INSTALLPATH}")
+default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
+default_conf_dir=${TOPDIR}/conf
+seaf_encrypt=${INSTALLPATH}/seafile/bin/seaf-encrypt
+seaf_encrypt_opts=""
+
+export PATH=${INSTALLPATH}/seafile/bin:$PATH
+export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
+
+script_name=$0
+function usage () {
+    echo "usage : "
+    echo -e "$(basename ${script_name}) \n" \
+        "-f <seafile enc central config dir, must set>\n" \
+        "-e <seafile enc data dir, must set>"
+    echo ""
+}
+
+function validate_seafile_data_dir () {
+    if [[ ! -d ${default_seafile_data_dir} ]]; then
+        echo "Error: there is no seafile server data directory."
+        echo "Have you run setup-seafile.sh before this?"
+        echo ""
+        exit 1;
+    fi
+}
+
+function check_component_running() {
+    name=$1
+    cmd=$2
+    if pid=$(pgrep -f "$cmd" 2>/dev/null); then
+        echo "[$name] is running, pid $pid. You can stop it by: "
+        echo
+        echo "        kill $pid"
+        echo
+        echo "Stop it and try again."
+        echo
+        exit
+    fi
+}
+
+function validate_already_running () {
+    if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then
+        echo "seafile server is still running, stop it by \"seafile.sh stop\""
+        echo
+        exit 1;
+    fi
+
+    check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}"
+    check_component_running "seafdav" "wsgidav.server.server_cli"
+}
+
+function run_seaf_encrypt () {
+    validate_seafile_data_dir;
+
+	validate_already_running;
+
+    echo "Starting seaf-encrypt, please wait ..."
+
+    LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_encrypt} \
+        -c "${default_conf_dir}" \
+        -d "${default_seafile_data_dir}" \
+        ${seaf_encrypt_opts}
+
+    echo "seaf-encrypt run done"
+    echo
+}
+
+if [ $# -gt 0 ];
+then
+    for param in $@;
+    do
+        if [ ${param} = "-h" -o ${param} = "--help" ];
+        then
+            usage;
+            exit 1;
+        fi
+    done
+fi
+
+seaf_encrypt_opts=$@
+run_seaf_encrypt;
+
+echo "Done."
diff --git a/scripts/seaf-gen-key.sh b/scripts/seaf-gen-key.sh
new file mode 100755
index 0000000000..a154ffcec0
--- /dev/null
+++ b/scripts/seaf-gen-key.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+echo ""
+
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+seaf_genkey=${INSTALLPATH}/seafile/bin/seaf-gen-key
+seaf_genkey_opts=""
+
+export PATH=${INSTALLPATH}/seafile/bin:$PATH
+export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
+
+script_name=$0
+function usage () {
+    echo "usage : "
+    echo -e "$(basename ${script_name})\n" \
+        "-p <file path to write key iv, default ./seaf-key.txt>"
+    echo ""
+}
+
+function run_seaf_genkey () {
+    echo "Starting seaf-gen-key, please wait ..."
+
+    LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_genkey} \
+        ${seaf_genkey_opts}
+
+    echo "seaf-gen-key run done"
+    echo
+}
+
+if [ $# -gt 0 ];
+then
+    for param in $@;
+    do
+        if [ ${param} = "-h" -o ${param} = "--help" ];
+        then
+            usage;
+            exit 1;
+        fi
+    done
+fi
+
+seaf_genkey_opts=$@
+run_seaf_genkey;
+
+echo "Done."
diff --git a/scripts/seaf-import.sh b/scripts/seaf-import.sh
new file mode 100755
index 0000000000..4610937ca1
--- /dev/null
+++ b/scripts/seaf-import.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+echo ""
+
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+TOPDIR=$(dirname "${INSTALLPATH}")
+default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
+default_conf_dir=${TOPDIR}/conf
+seaf_import=${INSTALLPATH}/seafile/bin/seaf-import
+seaf_import_opts=""
+
+export PATH=${INSTALLPATH}/seafile/bin:$PATH
+export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
+
+script_name=$0
+function usage () {
+    echo "usage : "
+    echo -e "$(basename ${script_name}) \n" \
+        "-p <import dir path, must set>\n" \
+        "-n <repo name, must set>\n" \
+        "-u <repo owner, must set>"
+    echo ""
+}
+
+
+function validate_seafile_data_dir () {
+    if [[ ! -d ${default_seafile_data_dir} ]]; then
+        echo "Error: there is no seafile server data directory."
+        echo "Have you run setup-seafile.sh before this?"
+        echo ""
+        exit 1;
+    fi
+}
+
+function check_component_running() {
+    name=$1
+    cmd=$2
+    if pid=$(pgrep -f "$cmd" 2>/dev/null); then
+        echo "[$name] is running, pid $pid. You can stop it by: "
+        echo
+        echo "        kill $pid"
+        echo
+        echo "Stop it and try again."
+        echo
+        exit
+    fi
+}
+
+<<'COMMENT'
+function validate_already_running () {
+    if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then
+        echo "seafile server is still running, stop it by \"seafile.sh stop\""
+        echo
+        exit 1;
+    fi
+
+    check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}"
+    check_component_running "seafdav" "wsgidav.server.server_cli"
+}
+COMMENT
+
+function run_seaf_import () {
+    validate_seafile_data_dir;
+
+#	validate_already_running;
+
+    echo "Starting seaf-import, please wait ..."
+
+    LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_import} \
+        -c "${default_conf_dir}" \
+        -d "${default_seafile_data_dir}" \
+        ${seaf_import_opts}
+
+    echo " run done"
+    echo
+}
+
+if [ $# -gt 0 ];
+then
+    for param in $@;
+    do
+        if [ ${param} = "-h" -o ${param} = "--help" ];
+        then
+            usage;
+            exit 1;
+        fi
+    done
+fi
+
+seaf_import_opts=$@
+run_seaf_import;
+
+echo "Done."
diff --git a/scripts/seafile-background-tasks.sh b/scripts/seafile-background-tasks.sh
new file mode 100755
index 0000000000..56bddae76a
--- /dev/null
+++ b/scripts/seafile-background-tasks.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+
+echo ""
+
+SCRIPT=$(readlink -f "$0")
+INSTALLPATH=$(dirname "${SCRIPT}")
+TOPDIR=$(dirname "${INSTALLPATH}")
+default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
+
+logdir=${TOPDIR}/logs
+pro_pylibs_dir=${INSTALLPATH}/pro/python
+
+seafevents_conf=${TOPDIR}/conf/seafevents.conf
+seafile_background_tasks_log=${logdir}/seafile-background-tasks.log
+
+seahub_dir=${INSTALLPATH}/seahub
+central_config_dir=${TOPDIR}/conf
+
+export SEAHUB_DIR=${seahub_dir}
+export PATH=${INSTALLPATH}/seafile/bin:$PATH
+export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
+
+script_name=$0
+function usage () {
+    echo "Usage: "
+    echo
+    echo "  $(basename "${script_name}") { start <port> | stop | restart <port> }"
+    echo
+    echo ""
+}
+
+# Check args
+if [[ $1 != "start" && $1 != "stop" && $1 != "restart" ]]; then
+    usage;
+    exit 1;
+fi
+
+function check_python_executable() {
+    if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
+        return 0
+    fi
+
+    if which python3 2>/dev/null 1>&2; then
+        PYTHON=python3
+    elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
+        echo
+        echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
+        echo
+        exit 1
+    else
+        PYTHON="python"$(python --version | cut -b 8-10)
+        if !which $PYTHON 2>/dev/null 1>&2; then
+            echo
+            echo "Can't find a python executable of $PYTHON in PATH"
+            echo "Install $PYTHON before continue."
+            echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
+            echo
+            exit 1
+        fi
+    fi
+}
+
+function validate_seafile_data_dir () {
+    if [[ ! -d ${default_seafile_data_dir} ]]; then
+        echo "Error: there is no seafile server data directory."
+        echo "Have you run setup-seafile.sh before this?"
+        echo ""
+        exit 1;
+    fi
+
+    pidfile=${TOPDIR}/pids/seafile-background-tasks.pid
+}
+
+function ensure_single_instance () {
+    if pgrep -f "seafevents.background_tasks" 2>/dev/null 1>&2; then
+        echo "seafile background tasks is already running."
+        exit 1;
+    fi
+}
+
+function warning_if_seafile_not_running () {
+    if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
+        echo
+        echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?"
+        echo
+    fi
+}
+
+function prepare_log_dir() {
+    if ! [[ -d ${logsdir} ]]; then
+        if ! mkdir -p "${logdir}"; then
+            echo "ERROR: failed to create logs dir \"${logdir}\""
+            exit 1
+        fi
+    fi
+}
+
+function before_start() {
+    warning_if_seafile_not_running;
+    ensure_single_instance;
+    prepare_log_dir;
+
+    export CCNET_CONF_DIR=${default_ccnet_conf_dir}
+    export SEAFILE_CONF_DIR=${default_seafile_data_dir}
+    export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
+    export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime
+    export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
+    export PYTHONPATH=$PYTHONPATH:$pro_pylibs_dir
+    export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/
+    export PYTHONPATH=$PYTHONPATH:${INSTALLPATH}/seahub-extra/thirdparts
+    # Allow LDAP user sync to import seahub_settings.py
+    export PYTHONPATH=$PYTHONPATH:${central_config_dir}
+    export SEAFES_DIR=$pro_pylibs_dir/seafes
+}
+
+function start_seafile_background_tasks () {
+    before_start;
+    echo "Starting seafile background tasks ..."
+    $PYTHON -m seafevents.background_tasks --config-file "${seafevents_conf}" \
+        --loglevel debug --logfile "${seafile_background_tasks_log}" -P "${pidfile}" 2>/dev/null 1>&2 &
+
+    # Ensure started successfully
+    sleep 5
+    if ! pgrep -f "seafevents.background_tasks" >/dev/null; then
+        printf "\033[33mError: failed to start seafile background tasks.\033[m\n"
+        echo "Please try to run \"./seafile-background-tasks.sh start\" again"
+        exit 1;
+    fi
+}
+
+function stop_seafile_background_tasks () {
+    if [[ -f ${pidfile} ]]; then
+        pid=$(cat "${pidfile}")
+        echo "Stopping seafile background tasks ..."
+        kill "${pid}"
+        sleep 1
+        if ps "${pid}" 2>/dev/null 1>&2 ; then
+            kill -KILL "${pid}"
+        fi
+        pkill -f "soffice.*--invisible --nocrashreport"
+        rm -f "${pidfile}"
+        return 0
+    else
+        echo "seafile background tasks is not running"
+    fi
+}
+
+check_python_executable;
+validate_seafile_data_dir;
+
+case $1 in
+    "start" )
+        start_seafile_background_tasks;
+        ;;
+    "stop" )
+        stop_seafile_background_tasks;
+        ;;
+    "restart" )
+        stop_seafile_background_tasks
+        sleep 2
+        start_seafile_background_tasks
+        ;;
+esac
+
+echo "Done."
+echo ""
diff --git a/scripts/seahub.conf b/scripts/seahub.conf
index 2183e82619..221c77160b 100644
--- a/scripts/seahub.conf
+++ b/scripts/seahub.conf
@@ -1,7 +1,8 @@
 import os
 
 daemon = True
-workers = 5
+workers = 3
+threads = 5
 
 # Logging
 runtime_dir = os.path.dirname(__file__)
diff --git a/scripts/seahub_extra.mysql.sql b/scripts/seahub_extra.mysql.sql
new file mode 100644
index 0000000000..c00a3dfe12
--- /dev/null
+++ b/scripts/seahub_extra.mysql.sql
@@ -0,0 +1,22 @@
+CREATE TABLE IF NOT EXISTS `pubfile_grouppublicfile` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `group_id` int(11) NOT NULL,
+  `repo_id` varchar(36) NOT NULL,
+  `path` varchar(4096) NOT NULL,
+  `is_dir` tinyint(1) NOT NULL,
+  `added_by` varchar(256) NOT NULL,
+  `description` varchar(1024) NOT NULL,
+  `download_count` int(11) NOT NULL,
+  PRIMARY KEY (`id`),
+  KEY `pubfile_grouppublicfile_dc00373b` (`group_id`)
+) ENGINE=InnoDB;
+
+CREATE TABLE IF NOT EXISTS `sysadmin_extra_userloginlog` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `username` varchar(255) NOT NULL,
+  `login_date` datetime NOT NULL,
+  `login_ip` varchar(20) NOT NULL,
+  PRIMARY KEY (`id`),
+  KEY `sysadmin_extra_userloginlog_ee0cafa2` (`username`),
+  KEY `sysadmin_extra_userloginlog_c8db99ec` (`login_date`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
diff --git a/scripts/seahub_extra.sqlite3.sql b/scripts/seahub_extra.sqlite3.sql
new file mode 100644
index 0000000000..855cd597ad
--- /dev/null
+++ b/scripts/seahub_extra.sqlite3.sql
@@ -0,0 +1,21 @@
+CREATE TABLE IF NOT EXISTS "pubfile_grouppublicfile" (
+    "id" integer NOT NULL PRIMARY KEY,
+    "group_id" integer NOT NULL,
+    "repo_id" varchar(36) NOT NULL,
+    "path" varchar(4096) NOT NULL,
+    "is_dir" bool NOT NULL,
+    "added_by" varchar(256) NOT NULL,
+    "description" varchar(1024) NOT NULL,
+    "download_count" integer NOT NULL
+)
+;
+CREATE INDEX IF NOT EXISTS "pubfile_grouppublicfile_dc00373b" ON "pubfile_grouppublicfile" ("group_id");
+
+CREATE TABLE IF NOT EXISTS "sysadmin_extra_userloginlog" (
+    "id" integer NOT NULL PRIMARY KEY,
+    "username" varchar(255) NOT NULL,
+    "login_date" datetime NOT NULL,
+    "login_ip" varchar(20) NOT NULL
+);
+CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_c8db99ec" ON "sysadmin_extra_userloginlog" ("login_date");
+CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_ee0cafa2" ON "sysadmin_extra_userloginlog" ("username");
diff --git a/scripts/sqlite2mysql.py b/scripts/sqlite2mysql.py
index 8b30985241..dfd9a395fb 100644
--- a/scripts/sqlite2mysql.py
+++ b/scripts/sqlite2mysql.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 """Lifted from:
 http://stackoverflow.com/questions/18671/quick-easy-way-to-migrate-sqlite3-to-mysql
diff --git a/scripts/upgrade/check_backend.py b/scripts/upgrade/check_backend.py
new file mode 100644
index 0000000000..38e7e14739
--- /dev/null
+++ b/scripts/upgrade/check_backend.py
@@ -0,0 +1,41 @@
+#coding: UTF-8
+
+import os
+import sys
+import configparser
+
+
+def main():
+    cfg = configparser.ConfigParser()
+    seafile_conf_dir = os.environ['SEAFILE_CONF_DIR']
+    seafile_conf = os.path.join(seafile_conf_dir, 'seafile.conf')
+    cfg.read(seafile_conf)
+
+    sections_map =  {
+        'blocks': 'block_backend',
+        'fs': 'fs_object_backend',
+        'commits': 'commit_object_backend',
+    }
+
+    backends = {}
+    for name, section in sections_map.items():
+        if cfg.has_option(section, 'name'):
+            backend_name = cfg.get(section, 'name')
+        else:
+            backend_name = 'fs'
+        backends[name] = backend_name
+
+    if any([ bend == 's3' for bend in list(backends.values()) ]):
+        print('s3')
+        return
+
+    if any([ bend == 'ceph' for bend in list(backends.values()) ]):
+        print('ceph')
+        return
+
+if __name__ == '__main__':
+    try:
+        main()
+    except Exception as e:
+        sys.stderr.write(str(e))
+        sys.stderr.flush()
diff --git a/scripts/upgrade/db_update_1.3_1.4.py b/scripts/upgrade/db_update_1.3_1.4.py
index d0224aa707..864efa74e0 100644
--- a/scripts/upgrade/db_update_1.3_1.4.py
+++ b/scripts/upgrade/db_update_1.3_1.4.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 import sqlite3
 import os
diff --git a/scripts/upgrade/db_update_helper.py b/scripts/upgrade/db_update_helper.py
index 9eab54cc08..007f84d528 100644
--- a/scripts/upgrade/db_update_helper.py
+++ b/scripts/upgrade/db_update_helper.py
@@ -25,6 +25,7 @@ class EnvManager(object):
         self.ccnet_dir = os.environ['CCNET_CONF_DIR']
         self.seafile_dir = os.environ['SEAFILE_CONF_DIR']
         self.central_config_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR')
+        self.seafevents_db_dir = os.path.join(os.path.dirname(self.install_path), 'pro-data')
 
 
 env_mgr = EnvManager()
@@ -77,6 +78,7 @@ class DBUpdater(object):
         self.sql_dir = os.path.join(env_mgr.upgrade_dir, 'sql', version, name)
         pro_path = os.path.join(env_mgr.install_path, 'pro')
         self.is_pro = os.path.exists(pro_path)
+        self.version = version
 
     @staticmethod
     def get_instance(version):
@@ -269,7 +271,7 @@ class SQLiteDBUpdater(DBUpdater):
         self.ccnet_db = CcnetSQLiteDB(env_mgr.ccnet_dir)
         self.seafile_db = os.path.join(env_mgr.seafile_dir, 'seafile.db')
         self.seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db')
-        self.seafevents_db = os.path.join(env_mgr.top_dir, 'seafevents.db')
+        self.seafevents_db = os.path.join(env_mgr.seafevents_db_dir, 'seafevents.db')
 
     def update_db(self):
         super(SQLiteDBUpdater, self).update_db()
@@ -338,7 +340,7 @@ class MySQLDBUpdater(DBUpdater):
         try:
             conn = pymysql.connect(**kw)
         except Exception as e:
-            if isinstance(e, pymysql.err.OperationalError):
+            if isinstance(e, pymysql.OperationalError):
                 msg = str(e.args[1])
             else:
                 msg = str(e)
diff --git a/scripts/upgrade/fix_mysql_user.py b/scripts/upgrade/fix_mysql_user.py
index 4e5d9ec4cc..d7fd824b08 100644
--- a/scripts/upgrade/fix_mysql_user.py
+++ b/scripts/upgrade/fix_mysql_user.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 import os
 import sys
diff --git a/scripts/upgrade/minor-upgrade.sh b/scripts/upgrade/minor-upgrade.sh
index 62bc371f14..1b514a6821 100755
--- a/scripts/upgrade/minor-upgrade.sh
+++ b/scripts/upgrade/minor-upgrade.sh
@@ -19,6 +19,7 @@ dest_avatar_dir=${TOPDIR}/seahub-data/avatars
 seafile_server_symlink=${TOPDIR}/seafile-server-latest
 default_conf_dir=${TOPDIR}/conf
 default_ccnet_conf_dir=${TOPDIR}/ccnet
+default_seafile_data_dir=${TOPDIR}/seafile-data
 seahub_data_dir=${TOPDIR}/seahub-data
 elasticsearch_config_file=${seafile_server_symlink}/pro/elasticsearch/config/jvm.options
 
@@ -115,23 +116,12 @@ function move_old_elasticsearch_config_to_latest() {
     fi
 }
 
-function read_seafile_data_dir() {
-    seafile_ini=${default_ccnet_conf_dir}/seafile.ini
-    if [[ -f ${seafile_ini} ]]; then
-        seafile_data_dir=$(cat "${seafile_ini}")
-        if [[ ! -d ${seafile_data_dir} ]]; then
-            echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
-            echo "Please check it first, or create this directory yourself."
-            echo ""
-            exit 1;
-        else
-            if [[ ${seafile_data_dir} != ${TOPDIR}/seafile-data ]]; then
-                if [[ ! -L ${TOPDIR}/seafile-data ]]; then
-                    ln -s ${seafile_data_dir} ${TOPDIR}/seafile-data
-                    echo "Created the symlink ${TOPDIR}/seafile-data for ${seafile_data_dir}."
-                fi  
-            fi  
-        fi
+function validate_seafile_data_dir() {
+    if [[ ! -d ${default_seafile_data_dir} ]]; then
+        echo "Error: there is no seafile server data directory."
+        echo "Have you run setup-seafile.sh before this?"
+        echo ""
+        exit 1;
     fi
 }
 
@@ -151,7 +141,7 @@ function rename_gunicorn_config() {
     fi
 }
  
-read_seafile_data_dir;
+validate_seafile_data_dir;
 rename_gunicorn_config;
 migrate_avatars;
 
diff --git a/scripts/upgrade/sql/3.0.0/mysql/seahub.sql b/scripts/upgrade/sql/3.0.0/mysql/seahub.sql
index 7e656ba0e4..6ce79cde8b 100644
--- a/scripts/upgrade/sql/3.0.0/mysql/seahub.sql
+++ b/scripts/upgrade/sql/3.0.0/mysql/seahub.sql
@@ -11,3 +11,13 @@ CREATE TABLE IF NOT EXISTS `api2_tokenv2` (
   PRIMARY KEY (`key`),
   UNIQUE KEY `user` (`user`,`platform`,`device_id`)
 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE IF NOT EXISTS `sysadmin_extra_userloginlog` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `username` varchar(255) NOT NULL,
+  `login_date` datetime NOT NULL,
+  `login_ip` varchar(20) NOT NULL,
+  PRIMARY KEY (`id`),
+  KEY `sysadmin_extra_userloginlog_ee0cafa2` (`username`),
+  KEY `sysadmin_extra_userloginlog_c8db99ec` (`login_date`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8
diff --git a/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql b/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql
index c05453ac37..161c9259da 100644
--- a/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql
+++ b/scripts/upgrade/sql/3.0.0/sqlite3/seahub.sql
@@ -10,3 +10,12 @@ CREATE TABLE IF NOT EXISTS "api2_tokenv2" (
     "last_login_ip" char(39),
     UNIQUE ("user", "platform", "device_id")
 );
+
+CREATE TABLE IF NOT EXISTS "sysadmin_extra_userloginlog" (
+    "id" integer NOT NULL PRIMARY KEY,
+    "username" varchar(255) NOT NULL,
+    "login_date" datetime NOT NULL,
+    "login_ip" varchar(20) NOT NULL
+);
+CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_c8db99ec" ON "sysadmin_extra_userloginlog" ("login_date");
+CREATE INDEX IF NOT EXISTS "sysadmin_extra_userloginlog_ee0cafa2" ON "sysadmin_extra_userloginlog" ("username");
diff --git a/scripts/upgrade/sql/6.2.0/mysql/seahub.sql b/scripts/upgrade/sql/6.2.0/mysql/seahub.sql
index 81817f8a40..9b0b070766 100644
--- a/scripts/upgrade/sql/6.2.0/mysql/seahub.sql
+++ b/scripts/upgrade/sql/6.2.0/mysql/seahub.sql
@@ -81,4 +81,8 @@ CREATE TABLE IF NOT EXISTS `role_permissions_adminrole` (
 
 ALTER TABLE `sysadmin_extra_userloginlog` ADD COLUMN `login_success` tinyint(1) NOT NULL default 1;
 ALTER TABLE `profile_profile` ADD COLUMN `list_in_address_book` tinyint(1) NOT NULL default 0;
-ALTER TABLE `profile_profile` ADD INDEX `profile_profile_3d5d3631` (`list_in_address_book`);
\ No newline at end of file
+ALTER TABLE `profile_profile` ADD INDEX `profile_profile_3d5d3631` (`list_in_address_book`);
+ALTER TABLE `FileAudit` ADD INDEX `fileaudit_timestamp` (`timestamp`);
+ALTER TABLE `Event` ADD INDEX `event_timestamp` (`timestamp`);
+ALTER TABLE `UserTrafficStat` ADD INDEX `usertrafficstat_timestamp` (`month`);
+ALTER TABLE `FileUpdate` ADD INDEX `fileupdate_timestamp` (`timestamp`);
diff --git a/scripts/upgrade/sql/6.3.0/mysql/seafile.sql b/scripts/upgrade/sql/6.3.0/mysql/seafile.sql
index 7096b6689d..4405c44033 100644
--- a/scripts/upgrade/sql/6.3.0/mysql/seafile.sql
+++ b/scripts/upgrade/sql/6.3.0/mysql/seafile.sql
@@ -10,13 +10,17 @@ ALTER TABLE RepoOwner DROP primary key;
 ALTER TABLE RepoOwner ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
 ALTER TABLE RepoOwner ADD UNIQUE (repo_id);
 
+ALTER TABLE RepoGroup DROP primary key;
 ALTER TABLE RepoGroup ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE RepoGroup ADD UNIQUE (group_id, repo_id);
 
 ALTER TABLE InnerPubRepo DROP primary key;
 ALTER TABLE InnerPubRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
 ALTER TABLE InnerPubRepo ADD UNIQUE (repo_id);
 
+ALTER TABLE RepoUserToken DROP primary key;
 ALTER TABLE RepoUserToken ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE RepoUserToken ADD UNIQUE (repo_id, token);
 
 ALTER TABLE RepoTokenPeerInfo DROP primary key;
 ALTER TABLE RepoTokenPeerInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
@@ -78,10 +82,55 @@ ALTER TABLE OrgUserQuota DROP primary key;
 ALTER TABLE OrgUserQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
 ALTER TABLE OrgUserQuota ADD UNIQUE (org_id, user);
 
-ALTER TABLE SystemInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
-
 ALTER TABLE Branch DROP primary key;
 ALTER TABLE Branch ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
 ALTER TABLE Branch ADD UNIQUE (repo_id, name);
 
 ALTER TABLE SeafileConf ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+
+ALTER TABLE FileLocks ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+
+ALTER TABLE OrgRepo DROP primary key;
+ALTER TABLE OrgRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE OrgRepo ADD UNIQUE (org_id, repo_id);
+
+ALTER TABLE OrgGroupRepo DROP primary key;
+ALTER TABLE OrgGroupRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE OrgGroupRepo ADD UNIQUE (org_id, group_id, repo_id);
+
+ALTER TABLE OrgInnerPubRepo DROP primary key;
+ALTER TABLE OrgInnerPubRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE OrgInnerPubRepo ADD UNIQUE (org_id, repo_id);
+
+ALTER TABLE RepoSyncError DROP primary key;
+ALTER TABLE RepoSyncError ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE RepoSyncError ADD UNIQUE (token);
+
+ALTER TABLE GCID DROP primary key;
+ALTER TABLE GCID ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE GCID ADD UNIQUE (repo_id);
+
+ALTER TABLE LastGCID DROP primary key;
+ALTER TABLE LastGCID ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE LastGCID ADD UNIQUE (repo_id, client_id);
+
+ALTER TABLE FolderUserPerm ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+
+ALTER TABLE FolderGroupPerm ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+
+ALTER TABLE FolderPermTimestamp DROP primary key;
+ALTER TABLE FolderPermTimestamp ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE FolderPermTimestamp ADD UNIQUE (repo_id);
+
+ALTER TABLE WebUploadTempFiles ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+
+ALTER TABLE RepoStorageId ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+
+ALTER TABLE RoleQuota DROP primary key;
+ALTER TABLE RoleQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
+ALTER TABLE RoleQuota ADD UNIQUE (role);
+
+CREATE TABLE IF NOT EXISTS OrgSharedRepo (id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,org_id INT, repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15), INDEX (org_id, repo_id), INDEX(from_email), INDEX(to_email)) ENGINE=INNODB;
+ALTER TABLE OrgSharedRepo ADD INDEX(repo_id);
+
+ALTER TABLE OrgRepo ADD INDEX(user);
diff --git a/scripts/upgrade/sql/6.3.0/mysql/seahub.sql b/scripts/upgrade/sql/6.3.0/mysql/seahub.sql
index 692d675039..27d4b1fc49 100644
--- a/scripts/upgrade/sql/6.3.0/mysql/seahub.sql
+++ b/scripts/upgrade/sql/6.3.0/mysql/seahub.sql
@@ -168,3 +168,8 @@ ALTER TABLE notifications_notification ADD INDEX `notifications_notification_386
 ALTER TABLE institutions_institutionadmin ADD INDEX `institutions_institutionadmin_user_7560167c8413ff0e_uniq` (`user`);
 
 ALTER TABLE `post_office_attachment` add column `mimetype` varchar(255) NOT NULL;
+
+ALTER TABLE Event ADD INDEX `ix_event_timestamp` (`timestamp`);
+ALTER TABLE FileAudit ADD INDEX `ix_FileAudit_timestamp` (`timestamp`);
+ALTER TABLE FileUpdate ADD INDEX `ix_FileUpdate_timestamp` (`timestamp`);
+ALTER TABLE UserTrafficStat ADD INDEX `ix_UserTrafficStat_month` (`month`);
diff --git a/scripts/upgrade/upgrade_2.2_3.0.sh b/scripts/upgrade/upgrade_2.2_3.0.sh
index d0643d3a65..7cf6ab2872 100755
--- a/scripts/upgrade/upgrade_2.2_3.0.sh
+++ b/scripts/upgrade/upgrade_2.2_3.0.sh
@@ -132,7 +132,7 @@ function upgrade_seafile_server_latest_symlink() {
     seafile_server_symlink=${TOPDIR}/seafile-server-latest
     if [[ -L "${seafile_server_symlink}" || ! -e "${seafile_server_symlink}" ]]; then
         echo
-        printf "updating \033[33m${seafile_server_symlink}\033[m symbolic link to \033[33m${INSTALLPATH}\033[m ...\n\n"
+        printf "updating \033[32m${seafile_server_symlink}\033[m symbolic link to \033[32m${INSTALLPATH}\033[m ...\n\n"
         echo
         if ! rm -f "${seafile_server_symlink}"; then
             echo "Failed to remove ${seafile_server_symlink}"
@@ -148,10 +148,27 @@ function upgrade_seafile_server_latest_symlink() {
     fi
 }
 
+function show_notice_for_s3_ceph_user() {
+    echo "-----------------------------------------------------------------"
+    echo "Important: You are using ${backend} storage, please follow the following "
+    echo "upgrade notice to migrate your data to 3.0 format"
+    echo
+    echo "  http://seacloud.cc/group/180/wiki/seafile-pro-3.0-upgrade-notice/"
+    echo "-----------------------------------------------------------------"
+    echo
+    echo
+}
+
+check_backend_py=${UPGRADE_DIR}/check_backend.py
+backend=
 function migrate_seafile_data_format() {
+    backend=$($PYTHON ${check_backend_py})
+    if [[ "${backend}" == "s3" || "${backend}" == "ceph" ]]; then
+        return
+    fi
     seaf_migrate=${INSTALLPATH}/seafile/bin/seaf-migrate
     echo
-    echo "now migrating seafile data to 3.0 format"
+    echo "Now migrating your seafile data to 3.0 format. It may take a while."
     echo
     if ! LD_LIBRARY_PATH=${SEAFILE_LD_LIBRARY_PATH} ${seaf_migrate} \
             -c "${default_ccnet_conf_dir}" -d "${seafile_data_dir}"; then
@@ -183,9 +200,12 @@ update_database;
 
 upgrade_seafile_server_latest_symlink;
 
-
-echo
-echo "-----------------------------------------------------------------"
-echo "Upgraded your seafile server successfully."
-echo "-----------------------------------------------------------------"
-echo
+if [[ "${backend}" == "s3" || "${backend}" == "ceph" ]]; then
+    show_notice_for_s3_ceph_user;
+else
+    echo
+    echo "-----------------------------------------------------------------"
+    echo "Upgraded your seafile server successfully."
+    echo "-----------------------------------------------------------------"
+    echo
+fi
diff --git a/scripts/upgrade/upgrade_4.0_4.1.sh b/scripts/upgrade/upgrade_4.0_4.1.sh
index e3eb848d00..e4ee7e04f0 100755
--- a/scripts/upgrade/upgrade_4.0_4.1.sh
+++ b/scripts/upgrade/upgrade_4.0_4.1.sh
@@ -226,6 +226,7 @@ chmod 0600 "$seahub_settings_py"
 chmod 0700 "$seafile_data_dir"
 chmod 0700 "$default_ccnet_conf_dir"
 chmod 0700 "$default_conf_dir"
+chmod 0700 "$TOPDIR"/pro-data
 
 echo
 echo "-----------------------------------------------------------------"
diff --git a/scripts/upgrade/upgrade_4.1_4.2.sh b/scripts/upgrade/upgrade_4.1_4.2.sh
index f1754387b4..d0ee40514d 100755
--- a/scripts/upgrade/upgrade_4.1_4.2.sh
+++ b/scripts/upgrade/upgrade_4.1_4.2.sh
@@ -192,6 +192,13 @@ function move_old_customdir_outside() {
     cp -rf "${old_customdir}" "${seahub_data_dir}/"
 }
 
+function remove_es_index() {
+    local es_data_dir=$TOPDIR/pro-data/search/data
+    echo -n "Removing old search index ... "
+    rm -rf $es_data_dir && mkdir -p $es_data_dir
+    echo "Done"
+}
+
 #################
 # The main execution flow of the script
 ################
@@ -202,6 +209,9 @@ ensure_server_not_running;
 
 update_database;
 
+# We changed elasticsearch index settings in 4.2.0, need to recreate the index.
+remove_es_index;
+
 migrate_avatars;
 
 
diff --git a/scripts/upgrade/upgrade_4.2_4.3.sh b/scripts/upgrade/upgrade_4.2_4.3.sh
index b79035d2e9..51b5213534 100755
--- a/scripts/upgrade/upgrade_4.2_4.3.sh
+++ b/scripts/upgrade/upgrade_4.2_4.3.sh
@@ -200,6 +200,17 @@ function regenerate_secret_key() {
     fi
 }
 
+function remove_es_index() {
+    local es_data_dir=$TOPDIR/pro-data/search/data
+    echo -n "Removing old search index ... "
+    rm -rf $es_data_dir && mkdir -p $es_data_dir
+    echo "Done"
+}
+
+function remove_office_files() {
+    rm -rf /tmp/seafile-office-output/html/*
+}
+
 #################
 # The main execution flow of the script
 ################
@@ -212,6 +223,10 @@ regenerate_secret_key;
 
 update_database;
 
+# We changed elasticsearch index settings in 4.3.0, need to recreate the index.
+remove_es_index;
+remove_office_files;
+
 migrate_avatars;
 
 
diff --git a/scripts/upgrade/upgrade_4.4_5.0.sh b/scripts/upgrade/upgrade_4.4_5.0.sh
index dbec11d02d..cad8c7b662 100755
--- a/scripts/upgrade/upgrade_4.4_5.0.sh
+++ b/scripts/upgrade/upgrade_4.4_5.0.sh
@@ -9,6 +9,7 @@ default_conf_dir=${TOPDIR}/conf
 seafile_server_symlink=${TOPDIR}/seafile-server-latest
 seahub_data_dir=${TOPDIR}/seahub-data
 seahub_settings_py=${TOPDIR}/seahub_settings.py
+pro_data_dir=${TOPDIR}/pro-data
 
 manage_py=${INSTALLPATH}/seahub/manage.py
 
@@ -205,6 +206,7 @@ function copy_confs_to_central_conf_dir() {
         $default_ccnet_conf_dir/ccnet.conf
         $seafile_data_dir/seafile.conf
         $seahub_settings_py
+        $pro_data_dir/seafevents.conf
     )
     for conffile in ${confs[*]}; do
         if grep -q "This file has been moved" $conffile; then