mirror of
https://github.com/haiwen/seafile-server.git
synced 2025-04-28 03:20:10 +00:00
commit
c48416adfd
@ -1,859 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: UTF-8
|
||||
|
||||
'''This script builds the seafile server tarball.
|
||||
|
||||
Some notes:
|
||||
|
||||
1. The working directory is always the 'builddir'. 'os.chdir' is only called
|
||||
to change to the 'builddir'. We make use of the 'cwd' argument in
|
||||
'subprocess.Popen' to run a command in a specific directory.
|
||||
|
||||
2. django/djangorestframework/djblets/gunicorn/flup must be easy_install-ed to
|
||||
a directory before running this script. That directory is passed in as the
|
||||
'--thirdpartdir' arguments.
|
||||
|
||||
'''
|
||||
import sys
|
||||
import os
|
||||
import glob
|
||||
import subprocess
|
||||
import tempfile
|
||||
import shutil
|
||||
import re
|
||||
import subprocess
|
||||
import optparse
|
||||
import atexit
|
||||
import platform
|
||||
|
||||
####################
|
||||
### Global variables
|
||||
####################
|
||||
|
||||
# command line configuartion
|
||||
conf = {}
|
||||
|
||||
# key names in the conf dictionary.
|
||||
CONF_VERSION = 'version'
|
||||
CONF_SEAFILE_VERSION = 'seafile_version'
|
||||
CONF_LIBSEARPC_VERSION = 'libsearpc_version'
|
||||
CONF_SRCDIR = 'srcdir'
|
||||
CONF_KEEP = 'keep'
|
||||
CONF_BUILDDIR = 'builddir'
|
||||
CONF_OUTPUTDIR = 'outputdir'
|
||||
CONF_THIRDPARTDIR = 'thirdpartdir'
|
||||
CONF_NO_STRIP = 'nostrip'
|
||||
CONF_ENABLE_S3 = 's3'
|
||||
CONF_YES = 'yes'
|
||||
CONF_JOBS = 'jobs'
|
||||
CONF_MYSQL_CONFIG = 'mysql_config'
|
||||
|
||||
####################
|
||||
### Common helper functions
|
||||
####################
|
||||
def highlight(content, is_error=False):
|
||||
'''Add ANSI color to content to get it highlighted on terminal'''
|
||||
if is_error:
|
||||
return '\x1b[1;31m%s\x1b[m' % content
|
||||
else:
|
||||
return '\x1b[1;32m%s\x1b[m' % content
|
||||
|
||||
def info(msg):
|
||||
print(highlight('[INFO] ') + msg)
|
||||
|
||||
def find_in_path(prog):
|
||||
'''Find a file in system path'''
|
||||
dirs = os.environ['PATH'].split(':')
|
||||
for d in dirs:
|
||||
if d == '':
|
||||
continue
|
||||
path = os.path.join(d, prog)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
def error(msg=None, usage=None):
|
||||
if msg:
|
||||
print(highlight('[ERROR] ') + msg)
|
||||
if usage:
|
||||
print(usage)
|
||||
sys.exit(1)
|
||||
|
||||
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
|
||||
'''Run a program and wait for it to finish, and return its exit code. The
|
||||
standard output of this program is supressed.
|
||||
|
||||
'''
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
if suppress_stdout:
|
||||
stdout = devnull
|
||||
else:
|
||||
stdout = sys.stdout
|
||||
|
||||
if suppress_stderr:
|
||||
stderr = devnull
|
||||
else:
|
||||
stderr = sys.stderr
|
||||
|
||||
proc = subprocess.Popen(argv,
|
||||
cwd=cwd,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
env=env)
|
||||
return proc.wait()
|
||||
|
||||
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
|
||||
'''Like run_argv but specify a command line string instead of argv'''
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
if suppress_stdout:
|
||||
stdout = devnull
|
||||
else:
|
||||
stdout = sys.stdout
|
||||
|
||||
if suppress_stderr:
|
||||
stderr = devnull
|
||||
else:
|
||||
stderr = sys.stderr
|
||||
|
||||
proc = subprocess.Popen(cmdline,
|
||||
cwd=cwd,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
env=env,
|
||||
shell=True)
|
||||
return proc.wait()
|
||||
|
||||
def must_mkdir(path):
|
||||
'''Create a directory, exit on failure'''
|
||||
try:
|
||||
os.mkdir(path)
|
||||
except OSError as e:
|
||||
error('failed to create directory %s:%s' % (path, e))
|
||||
|
||||
def must_copy(src, dst):
|
||||
'''Copy src to dst, exit on failure'''
|
||||
try:
|
||||
shutil.copy(src, dst)
|
||||
except Exception as e:
|
||||
error('failed to copy %s to %s: %s' % (src, dst, e))
|
||||
|
||||
class Project(object):
|
||||
'''Base class for a project'''
|
||||
# Project name, i.e. libseaprc/seafile/seahub
|
||||
name = ''
|
||||
|
||||
# A list of shell commands to configure/build the project
|
||||
build_commands = []
|
||||
|
||||
def __init__(self):
|
||||
# the path to pass to --prefix=/<prefix>
|
||||
self.prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile')
|
||||
self.version = self.get_version()
|
||||
self.src_tarball = os.path.join(conf[CONF_SRCDIR],
|
||||
'%s-%s.tar.gz' % (self.name, self.version))
|
||||
# project dir, like <builddir>/seafile-1.2.2/
|
||||
self.projdir = os.path.join(conf[CONF_BUILDDIR], '%s-%s' % (self.name, self.version))
|
||||
|
||||
def get_version(self):
|
||||
# libsearpc can have different versions from seafile.
|
||||
raise NotImplementedError
|
||||
|
||||
def uncompress(self):
|
||||
'''Uncompress the source from the tarball'''
|
||||
info('Uncompressing %s' % self.name)
|
||||
|
||||
if run('tar xf %s' % self.src_tarball) < 0:
|
||||
error('failed to uncompress source of %s' % self.name)
|
||||
|
||||
def build(self):
|
||||
'''Build the source'''
|
||||
info('Building %s' % self.name)
|
||||
for cmd in self.build_commands:
|
||||
if run(cmd, cwd=self.projdir) != 0:
|
||||
error('error when running command:\n\t%s\n' % cmd)
|
||||
|
||||
class Libsearpc(Project):
|
||||
name = 'libsearpc'
|
||||
|
||||
def __init__(self):
|
||||
Project.__init__(self)
|
||||
self.build_commands = [
|
||||
'./configure --prefix=%s' % self.prefix,
|
||||
'make -j%s' % conf[CONF_JOBS],
|
||||
'make install'
|
||||
]
|
||||
|
||||
def get_version(self):
|
||||
return conf[CONF_LIBSEARPC_VERSION]
|
||||
|
||||
class Seafile(Project):
|
||||
name = 'seafile'
|
||||
def __init__(self):
|
||||
Project.__init__(self)
|
||||
s3_support = ''
|
||||
if conf[CONF_ENABLE_S3]:
|
||||
s3_support = '--enable-s3'
|
||||
|
||||
configure_command = './configure --prefix=%s %s --enable-ldap' % (self.prefix, s3_support)
|
||||
if conf[CONF_MYSQL_CONFIG]:
|
||||
configure_command += ' --with-mysql=%s' % conf[CONF_MYSQL_CONFIG]
|
||||
|
||||
self.build_commands = [
|
||||
configure_command,
|
||||
'make -j%s' % conf[CONF_JOBS],
|
||||
'make install'
|
||||
]
|
||||
|
||||
def get_version(self):
|
||||
return conf[CONF_SEAFILE_VERSION]
|
||||
|
||||
class Seahub(Project):
|
||||
name = 'seahub'
|
||||
def __init__(self):
|
||||
Project.__init__(self)
|
||||
# nothing to do for seahub
|
||||
self.build_commands = [
|
||||
]
|
||||
|
||||
def get_version(self):
|
||||
return conf[CONF_SEAFILE_VERSION]
|
||||
|
||||
def build(self):
|
||||
self.write_version_to_settings_py()
|
||||
|
||||
Project.build(self)
|
||||
|
||||
def write_version_to_settings_py(self):
|
||||
'''Write the version of current seafile server to seahub'''
|
||||
settings_py = os.path.join(self.projdir, 'seahub', 'settings.py')
|
||||
|
||||
line = '\nSEAFILE_VERSION = "%s"\n' % conf[CONF_VERSION]
|
||||
with open(settings_py, 'a+') as fp:
|
||||
fp.write(line)
|
||||
|
||||
|
||||
def check_seahub_thirdpart(thirdpartdir):
|
||||
'''The ${thirdpartdir} must have django/djblets/gunicorn pre-installed. So
|
||||
we can copy it to seahub/thirdpart
|
||||
|
||||
'''
|
||||
thirdpart_libs = [
|
||||
'Django',
|
||||
# 'Djblets',
|
||||
'gunicorn',
|
||||
#'flup',
|
||||
'chardet',
|
||||
'python_dateutil',
|
||||
#'django_picklefield',
|
||||
#'django_constance',
|
||||
# 'SQLAlchemy',
|
||||
# 'python_daemon',
|
||||
# 'lockfile',
|
||||
# 'six',
|
||||
]
|
||||
def check_thirdpart_lib(name):
|
||||
name += '*'
|
||||
if not glob.glob(os.path.join(thirdpartdir, name)):
|
||||
error('%s not found in %s' % (name, thirdpartdir))
|
||||
|
||||
for lib in thirdpart_libs:
|
||||
check_thirdpart_lib(lib)
|
||||
|
||||
def check_targz_src(proj, version, srcdir):
|
||||
src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version))
|
||||
if not os.path.exists(src_tarball):
|
||||
error('%s not exists' % src_tarball)
|
||||
|
||||
def check_targz_src_no_version(proj, srcdir):
|
||||
src_tarball = os.path.join(srcdir, '%s.tar.gz' % proj)
|
||||
if not os.path.exists(src_tarball):
|
||||
error('%s not exists' % src_tarball)
|
||||
|
||||
def check_pdf2htmlEX():
|
||||
pdf2htmlEX_executable = find_in_path('pdf2htmlEX')
|
||||
if pdf2htmlEX_executable is None:
|
||||
error('pdf2htmlEX not found')
|
||||
|
||||
def validate_args(usage, options):
|
||||
required_args = [
|
||||
CONF_VERSION,
|
||||
CONF_LIBSEARPC_VERSION,
|
||||
CONF_SEAFILE_VERSION,
|
||||
CONF_SRCDIR,
|
||||
CONF_THIRDPARTDIR,
|
||||
]
|
||||
|
||||
# fist check required args
|
||||
for optname in required_args:
|
||||
if getattr(options, optname, None) == None:
|
||||
error('%s must be specified' % optname, usage=usage)
|
||||
|
||||
def get_option(optname):
|
||||
return getattr(options, optname)
|
||||
|
||||
# [ version ]
|
||||
def check_project_version(version):
|
||||
'''A valid version must be like 1.2.2, 1.3'''
|
||||
if not re.match('^[0-9]+(\.([0-9])+)+$', version):
|
||||
error('%s is not a valid version' % version, usage=usage)
|
||||
|
||||
version = get_option(CONF_VERSION)
|
||||
seafile_version = get_option(CONF_SEAFILE_VERSION)
|
||||
libsearpc_version = get_option(CONF_LIBSEARPC_VERSION)
|
||||
|
||||
check_project_version(version)
|
||||
check_project_version(libsearpc_version)
|
||||
check_project_version(seafile_version)
|
||||
|
||||
# [ srcdir ]
|
||||
srcdir = get_option(CONF_SRCDIR)
|
||||
check_targz_src('libsearpc', libsearpc_version, srcdir)
|
||||
check_targz_src('seafile', seafile_version, srcdir)
|
||||
check_targz_src('seahub', seafile_version, srcdir)
|
||||
check_targz_src_no_version('seafdav', srcdir)
|
||||
check_targz_src_no_version('seafobj', srcdir)
|
||||
|
||||
# check_pdf2htmlEX()
|
||||
|
||||
# [ builddir ]
|
||||
builddir = get_option(CONF_BUILDDIR)
|
||||
if not os.path.exists(builddir):
|
||||
error('%s does not exist' % builddir, usage=usage)
|
||||
|
||||
builddir = os.path.join(builddir, 'seafile-server-build')
|
||||
|
||||
# [ thirdpartdir ]
|
||||
thirdpartdir = get_option(CONF_THIRDPARTDIR)
|
||||
check_seahub_thirdpart(thirdpartdir)
|
||||
|
||||
# [ outputdir ]
|
||||
outputdir = get_option(CONF_OUTPUTDIR)
|
||||
if outputdir:
|
||||
if not os.path.exists(outputdir):
|
||||
error('outputdir %s does not exist' % outputdir, usage=usage)
|
||||
else:
|
||||
outputdir = os.getcwd()
|
||||
|
||||
# [ yes ]
|
||||
yes = get_option(CONF_YES)
|
||||
|
||||
# [ jobs ]
|
||||
jobs = get_option(CONF_JOBS)
|
||||
|
||||
# [ keep ]
|
||||
keep = get_option(CONF_KEEP)
|
||||
|
||||
# [ no strip]
|
||||
nostrip = get_option(CONF_NO_STRIP)
|
||||
|
||||
# [ s3 ]
|
||||
s3 = get_option(CONF_ENABLE_S3)
|
||||
|
||||
mysql_config_path = get_option(CONF_MYSQL_CONFIG)
|
||||
|
||||
conf[CONF_VERSION] = version
|
||||
conf[CONF_LIBSEARPC_VERSION] = libsearpc_version
|
||||
conf[CONF_SEAFILE_VERSION] = seafile_version
|
||||
|
||||
conf[CONF_BUILDDIR] = builddir
|
||||
conf[CONF_SRCDIR] = srcdir
|
||||
conf[CONF_OUTPUTDIR] = outputdir
|
||||
conf[CONF_KEEP] = keep
|
||||
conf[CONF_THIRDPARTDIR] = thirdpartdir
|
||||
conf[CONF_NO_STRIP] = nostrip
|
||||
conf[CONF_ENABLE_S3] = s3
|
||||
conf[CONF_YES] = yes
|
||||
conf[CONF_JOBS] = jobs
|
||||
conf[CONF_MYSQL_CONFIG] = mysql_config_path
|
||||
|
||||
prepare_builddir(builddir)
|
||||
show_build_info()
|
||||
|
||||
def show_build_info():
|
||||
'''Print all conf information. Confirm before continue.'''
|
||||
info('------------------------------------------')
|
||||
info('Seafile server %s: BUILD INFO' % conf[CONF_VERSION])
|
||||
info('------------------------------------------')
|
||||
info('seafile: %s' % conf[CONF_SEAFILE_VERSION])
|
||||
info('libsearpc: %s' % conf[CONF_LIBSEARPC_VERSION])
|
||||
info('builddir: %s' % conf[CONF_BUILDDIR])
|
||||
info('outputdir: %s' % conf[CONF_OUTPUTDIR])
|
||||
info('source dir: %s' % conf[CONF_SRCDIR])
|
||||
info('strip symbols: %s' % (not conf[CONF_NO_STRIP]))
|
||||
info('s3 support: %s' % (conf[CONF_ENABLE_S3]))
|
||||
info('clean on exit: %s' % (not conf[CONF_KEEP]))
|
||||
if conf[CONF_YES]:
|
||||
return
|
||||
info('------------------------------------------')
|
||||
info('press any key to continue ')
|
||||
info('------------------------------------------')
|
||||
input()
|
||||
|
||||
def prepare_builddir(builddir):
|
||||
must_mkdir(builddir)
|
||||
|
||||
if not conf[CONF_KEEP]:
|
||||
def remove_builddir():
|
||||
'''Remove the builddir when exit'''
|
||||
info('remove builddir before exit')
|
||||
shutil.rmtree(builddir, ignore_errors=True)
|
||||
atexit.register(remove_builddir)
|
||||
|
||||
os.chdir(builddir)
|
||||
|
||||
must_mkdir(os.path.join(builddir, 'seafile-server'))
|
||||
must_mkdir(os.path.join(builddir, 'seafile-server', 'seafile'))
|
||||
|
||||
def parse_args():
|
||||
parser = optparse.OptionParser()
|
||||
def long_opt(opt):
|
||||
return '--' + opt
|
||||
|
||||
parser.add_option(long_opt(CONF_YES),
|
||||
dest=CONF_YES,
|
||||
action='store_true')
|
||||
|
||||
parser.add_option(long_opt(CONF_JOBS),
|
||||
dest=CONF_JOBS,
|
||||
default=2,
|
||||
type=int)
|
||||
|
||||
parser.add_option(long_opt(CONF_THIRDPARTDIR),
|
||||
dest=CONF_THIRDPARTDIR,
|
||||
nargs=1,
|
||||
help='where to find the thirdpart libs for seahub')
|
||||
|
||||
parser.add_option(long_opt(CONF_VERSION),
|
||||
dest=CONF_VERSION,
|
||||
nargs=1,
|
||||
help='the version to build. Must be digits delimited by dots, like 1.3.0')
|
||||
|
||||
parser.add_option(long_opt(CONF_SEAFILE_VERSION),
|
||||
dest=CONF_SEAFILE_VERSION,
|
||||
nargs=1,
|
||||
help='the version of seafile as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
|
||||
|
||||
parser.add_option(long_opt(CONF_LIBSEARPC_VERSION),
|
||||
dest=CONF_LIBSEARPC_VERSION,
|
||||
nargs=1,
|
||||
help='the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
|
||||
|
||||
parser.add_option(long_opt(CONF_BUILDDIR),
|
||||
dest=CONF_BUILDDIR,
|
||||
nargs=1,
|
||||
help='the directory to build the source. Defaults to /tmp',
|
||||
default=tempfile.gettempdir())
|
||||
|
||||
parser.add_option(long_opt(CONF_OUTPUTDIR),
|
||||
dest=CONF_OUTPUTDIR,
|
||||
nargs=1,
|
||||
help='the output directory to put the generated server tarball. Defaults to the current directory.',
|
||||
default=os.getcwd())
|
||||
|
||||
parser.add_option(long_opt(CONF_SRCDIR),
|
||||
dest=CONF_SRCDIR,
|
||||
nargs=1,
|
||||
help='''Source tarballs must be placed in this directory.''')
|
||||
|
||||
parser.add_option(long_opt(CONF_KEEP),
|
||||
dest=CONF_KEEP,
|
||||
action='store_true',
|
||||
help='''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''')
|
||||
|
||||
parser.add_option(long_opt(CONF_NO_STRIP),
|
||||
dest=CONF_NO_STRIP,
|
||||
action='store_true',
|
||||
help='''do not strip debug symbols''')
|
||||
|
||||
parser.add_option(long_opt(CONF_ENABLE_S3),
|
||||
dest=CONF_ENABLE_S3,
|
||||
action='store_true',
|
||||
help='''enable amazon s3 support''')
|
||||
|
||||
parser.add_option(long_opt(CONF_MYSQL_CONFIG),
|
||||
dest=CONF_MYSQL_CONFIG,
|
||||
nargs=1,
|
||||
help='''Absolute path to mysql_config or mariadb_config program.''')
|
||||
|
||||
usage = parser.format_help()
|
||||
options, remain = parser.parse_args()
|
||||
if remain:
|
||||
error(usage=usage)
|
||||
|
||||
validate_args(usage, options)
|
||||
|
||||
def setup_build_env():
|
||||
'''Setup environment variables, such as export PATH=$BUILDDDIR/bin:$PATH'''
|
||||
prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile')
|
||||
def prepend_env_value(name, value, seperator=':'):
|
||||
'''append a new value to a list'''
|
||||
try:
|
||||
current_value = os.environ[name]
|
||||
except KeyError:
|
||||
current_value = ''
|
||||
|
||||
new_value = value
|
||||
if current_value:
|
||||
new_value += seperator + current_value
|
||||
|
||||
os.environ[name] = new_value
|
||||
|
||||
prepend_env_value('CPPFLAGS',
|
||||
'-I%s' % os.path.join(prefix, 'include'),
|
||||
seperator=' ')
|
||||
|
||||
prepend_env_value('CPPFLAGS',
|
||||
'-DLIBICONV_PLUG',
|
||||
seperator=' ')
|
||||
|
||||
if conf[CONF_NO_STRIP]:
|
||||
prepend_env_value('CPPFLAGS',
|
||||
'-g -O0',
|
||||
seperator=' ')
|
||||
|
||||
prepend_env_value('CFLAGS',
|
||||
'-g -O0',
|
||||
seperator=' ')
|
||||
|
||||
prepend_env_value('LDFLAGS',
|
||||
'-L%s' % os.path.join(prefix, 'lib'),
|
||||
seperator=' ')
|
||||
|
||||
prepend_env_value('LDFLAGS',
|
||||
'-L%s' % os.path.join(prefix, 'lib64'),
|
||||
seperator=' ')
|
||||
|
||||
prepend_env_value('PATH', os.path.join(prefix, 'bin'))
|
||||
prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib', 'pkgconfig'))
|
||||
prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib64', 'pkgconfig'))
|
||||
|
||||
def copy_user_manuals():
|
||||
builddir = conf[CONF_BUILDDIR]
|
||||
# src_pattern = os.path.join(builddir, Seafile().projdir, 'doc', '*.doc')
|
||||
src_pattern = os.path.join(builddir, Seafile().projdir, 'doc', 'seafile-tutorial.doc')
|
||||
dst_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'docs')
|
||||
|
||||
must_mkdir(dst_dir)
|
||||
|
||||
for path in glob.glob(src_pattern):
|
||||
must_copy(path, dst_dir)
|
||||
|
||||
def copy_seafdav():
|
||||
dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', 'thirdpart')
|
||||
tarball = os.path.join(conf[CONF_SRCDIR], 'seafdav.tar.gz')
|
||||
if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0:
|
||||
error('failed to uncompress %s' % tarball)
|
||||
|
||||
dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', 'thirdpart')
|
||||
tarball = os.path.join(conf[CONF_SRCDIR], 'seafobj.tar.gz')
|
||||
if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0:
|
||||
error('failed to uncompress %s' % tarball)
|
||||
|
||||
def copy_scripts_and_libs():
|
||||
'''Copy server release scripts and shared libs, as well as seahub
|
||||
thirdpart libs
|
||||
|
||||
'''
|
||||
builddir = conf[CONF_BUILDDIR]
|
||||
scripts_srcdir = os.path.join(builddir, Seafile().projdir, 'scripts')
|
||||
serverdir = os.path.join(builddir, 'seafile-server')
|
||||
|
||||
must_copy(os.path.join(scripts_srcdir, 'setup-seafile.sh'),
|
||||
serverdir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'setup-seafile-mysql.sh'),
|
||||
serverdir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'setup-seafile-mysql.py'),
|
||||
serverdir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'seafile.sh'),
|
||||
serverdir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'seahub.sh'),
|
||||
serverdir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'reset-admin.sh'),
|
||||
serverdir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'seaf-fuse.sh'),
|
||||
serverdir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'check_init_admin.py'),
|
||||
serverdir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'seaf-gc.sh'),
|
||||
serverdir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'seaf-fsck.sh'),
|
||||
serverdir)
|
||||
|
||||
# copy update scripts
|
||||
update_scriptsdir = os.path.join(scripts_srcdir, 'upgrade')
|
||||
dst_update_scriptsdir = os.path.join(serverdir, 'upgrade')
|
||||
try:
|
||||
shutil.copytree(update_scriptsdir, dst_update_scriptsdir)
|
||||
except Exception as e:
|
||||
error('failed to copy upgrade scripts: %s' % e)
|
||||
|
||||
# copy sql scripts
|
||||
sql_scriptsdir = os.path.join(scripts_srcdir, 'sql')
|
||||
dst_sql_scriptsdir = os.path.join(serverdir, 'sql')
|
||||
try:
|
||||
shutil.copytree(sql_scriptsdir, dst_sql_scriptsdir)
|
||||
except Exception as e:
|
||||
error('failed to copy sql scripts: %s' % e)
|
||||
|
||||
# copy runtime/seahub.conf
|
||||
runtimedir = os.path.join(serverdir, 'runtime')
|
||||
must_mkdir(runtimedir)
|
||||
must_copy(os.path.join(scripts_srcdir, 'seahub.conf'),
|
||||
runtimedir)
|
||||
|
||||
# move seahub to seafile-server/seahub
|
||||
src_seahubdir = Seahub().projdir
|
||||
dst_seahubdir = os.path.join(serverdir, 'seahub')
|
||||
try:
|
||||
shutil.move(src_seahubdir, dst_seahubdir)
|
||||
except Exception as e:
|
||||
error('failed to move seahub to seafile-server/seahub: %s' % e)
|
||||
|
||||
# copy seahub thirdpart libs
|
||||
seahub_thirdpart = os.path.join(dst_seahubdir, 'thirdpart')
|
||||
copy_seahub_thirdpart_libs(seahub_thirdpart)
|
||||
copy_seafdav()
|
||||
|
||||
|
||||
# copy_pdf2htmlex()
|
||||
|
||||
# copy shared c libs
|
||||
copy_shared_libs()
|
||||
copy_user_manuals()
|
||||
|
||||
def copy_pdf2htmlex():
|
||||
'''Copy pdf2htmlEX exectuable and its dependent libs'''
|
||||
pdf2htmlEX_executable = find_in_path('pdf2htmlEX')
|
||||
libs = get_dependent_libs(pdf2htmlEX_executable)
|
||||
|
||||
builddir = conf[CONF_BUILDDIR]
|
||||
dst_lib_dir = os.path.join(builddir,
|
||||
'seafile-server',
|
||||
'seafile',
|
||||
'lib')
|
||||
|
||||
dst_bin_dir = os.path.join(builddir,
|
||||
'seafile-server',
|
||||
'seafile',
|
||||
'bin')
|
||||
|
||||
for lib in libs:
|
||||
dst_file = os.path.join(dst_lib_dir, os.path.basename(lib))
|
||||
if os.path.exists(dst_file):
|
||||
continue
|
||||
info('Copying %s' % lib)
|
||||
must_copy(lib, dst_lib_dir)
|
||||
|
||||
must_copy(pdf2htmlEX_executable, dst_bin_dir)
|
||||
|
||||
def get_dependent_libs(executable):
|
||||
syslibs = ['libsearpc', 'libseafile', 'libpthread.so', 'libc.so', 'libm.so', 'librt.so', 'libdl.so', 'libselinux.so', 'libresolv.so' ]
|
||||
def is_syslib(lib):
|
||||
for syslib in syslibs:
|
||||
if syslib in lib:
|
||||
return True
|
||||
return False
|
||||
|
||||
ldd_output = subprocess.getoutput('ldd %s' % executable)
|
||||
ret = set()
|
||||
for line in ldd_output.splitlines():
|
||||
tokens = line.split()
|
||||
if len(tokens) != 4:
|
||||
continue
|
||||
if is_syslib(tokens[0]):
|
||||
continue
|
||||
|
||||
ret.add(tokens[2])
|
||||
|
||||
return ret
|
||||
|
||||
def copy_shared_libs():
|
||||
'''copy shared c libs, such as libevent, glib, libmysqlclient'''
|
||||
builddir = conf[CONF_BUILDDIR]
|
||||
|
||||
dst_dir = os.path.join(builddir,
|
||||
'seafile-server',
|
||||
'seafile',
|
||||
'lib')
|
||||
|
||||
seafile_path = os.path.join(builddir,
|
||||
'seafile-server',
|
||||
'seafile',
|
||||
'bin',
|
||||
'seaf-server')
|
||||
|
||||
seaf_fuse_path = os.path.join(builddir,
|
||||
'seafile-server',
|
||||
'seafile',
|
||||
'bin',
|
||||
'seaf-fuse')
|
||||
|
||||
libs = set()
|
||||
libs.update(get_dependent_libs(seafile_path))
|
||||
libs.update(get_dependent_libs(seaf_fuse_path))
|
||||
|
||||
for lib in libs:
|
||||
dst_file = os.path.join(dst_dir, os.path.basename(lib))
|
||||
if os.path.exists(dst_file):
|
||||
continue
|
||||
info('Copying %s' % lib)
|
||||
shutil.copy(lib, dst_dir)
|
||||
|
||||
def copy_seahub_thirdpart_libs(seahub_thirdpart):
|
||||
'''copy python third-party libraries from ${thirdpartdir} to
|
||||
seahub/thirdpart
|
||||
|
||||
'''
|
||||
src = conf[CONF_THIRDPARTDIR]
|
||||
dst = seahub_thirdpart
|
||||
|
||||
try:
|
||||
for name in os.listdir(src):
|
||||
src_path = os.path.join(src, name)
|
||||
target_path = os.path.join(dst, name)
|
||||
if os.path.isdir(src_path):
|
||||
shutil.copytree(src_path, target_path)
|
||||
else:
|
||||
shutil.copy(src_path, target_path)
|
||||
except Exception as e:
|
||||
error('failed to copy seahub thirdpart libs: %s' % e)
|
||||
|
||||
def strip_symbols():
|
||||
def do_strip(fn):
|
||||
run('chmod u+w %s' % fn)
|
||||
info('stripping: %s' % fn)
|
||||
run('strip "%s"' % fn)
|
||||
|
||||
def remove_static_lib(fn):
|
||||
info('removing: %s' % fn)
|
||||
os.remove(fn)
|
||||
|
||||
for parent, dnames, fnames in os.walk('seafile-server/seafile'):
|
||||
dummy = dnames # avoid pylint 'unused' warning
|
||||
for fname in fnames:
|
||||
fn = os.path.join(parent, fname)
|
||||
if os.path.isdir(fn):
|
||||
continue
|
||||
|
||||
if fn.endswith(".a") or fn.endswith(".la"):
|
||||
remove_static_lib(fn)
|
||||
continue
|
||||
|
||||
if os.path.islink(fn):
|
||||
continue
|
||||
|
||||
finfo = subprocess.getoutput('file "%s"' % fn)
|
||||
|
||||
if 'not stripped' in finfo:
|
||||
do_strip(fn)
|
||||
|
||||
def create_tarball(tarball_name):
|
||||
'''call tar command to generate a tarball'''
|
||||
version = conf[CONF_VERSION]
|
||||
|
||||
serverdir = 'seafile-server'
|
||||
versioned_serverdir = 'seafile-server-' + version
|
||||
|
||||
# move seafile-server to seafile-server-${version}
|
||||
try:
|
||||
shutil.move(serverdir, versioned_serverdir)
|
||||
except Exception as e:
|
||||
error('failed to move %s to %s: %s' % (serverdir, versioned_serverdir, e))
|
||||
|
||||
ignored_patterns = [
|
||||
# common ignored files
|
||||
'*.pyc',
|
||||
'*~',
|
||||
'*#',
|
||||
|
||||
# seahub
|
||||
os.path.join(versioned_serverdir, 'seahub', '.git*'),
|
||||
os.path.join(versioned_serverdir, 'seahub', 'media', 'flexpaper*'),
|
||||
os.path.join(versioned_serverdir, 'seahub', 'avatar', 'testdata*'),
|
||||
|
||||
# seafile
|
||||
os.path.join(versioned_serverdir, 'seafile', 'share*'),
|
||||
os.path.join(versioned_serverdir, 'seafile', 'include*'),
|
||||
os.path.join(versioned_serverdir, 'seafile', 'lib', 'pkgconfig*'),
|
||||
os.path.join(versioned_serverdir, 'seafile', 'lib64', 'pkgconfig*'),
|
||||
os.path.join(versioned_serverdir, 'seafile', 'bin', 'searpc-codegen.py'),
|
||||
os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile-admin'),
|
||||
os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile'),
|
||||
]
|
||||
|
||||
excludes_list = [ '--exclude=%s' % pattern for pattern in ignored_patterns ]
|
||||
excludes = ' '.join(excludes_list)
|
||||
|
||||
tar_cmd = 'tar czf %(tarball_name)s %(versioned_serverdir)s %(excludes)s' \
|
||||
% dict(tarball_name=tarball_name,
|
||||
versioned_serverdir=versioned_serverdir,
|
||||
excludes=excludes)
|
||||
|
||||
if run(tar_cmd) < 0:
|
||||
error('failed to generate the tarball')
|
||||
|
||||
def gen_tarball():
|
||||
# strip symbols of libraries to reduce size
|
||||
if not conf[CONF_NO_STRIP]:
|
||||
try:
|
||||
strip_symbols()
|
||||
except Exception as e:
|
||||
error('failed to strip symbols: %s' % e)
|
||||
|
||||
# determine the output name
|
||||
# 64-bit: seafile-server_1.2.2_x86-64.tar.gz
|
||||
# 32-bit: seafile-server_1.2.2_i386.tar.gz
|
||||
version = conf[CONF_VERSION]
|
||||
arch = os.uname()[-1].replace('_', '-')
|
||||
if 'arm' in platform.machine():
|
||||
arch = 'pi'
|
||||
elif arch != 'x86-64':
|
||||
arch = 'i386'
|
||||
|
||||
dbg = ''
|
||||
if conf[CONF_NO_STRIP]:
|
||||
dbg = '.dbg'
|
||||
|
||||
tarball_name = 'seafile-server_%(version)s_%(arch)s%(dbg)s.tar.gz' \
|
||||
% dict(version=version, arch=arch, dbg=dbg)
|
||||
dst_tarball = os.path.join(conf[CONF_OUTPUTDIR], tarball_name)
|
||||
|
||||
# generate the tarball
|
||||
try:
|
||||
create_tarball(tarball_name)
|
||||
except Exception as e:
|
||||
error('failed to generate tarball: %s' % e)
|
||||
|
||||
# move tarball to outputdir
|
||||
try:
|
||||
shutil.copy(tarball_name, dst_tarball)
|
||||
except Exception as e:
|
||||
error('failed to copy %s to %s: %s' % (tarball_name, dst_tarball, e))
|
||||
|
||||
print('---------------------------------------------')
|
||||
print('The build is successful. Output is:\t%s' % dst_tarball)
|
||||
print('---------------------------------------------')
|
||||
|
||||
def main():
|
||||
parse_args()
|
||||
setup_build_env()
|
||||
|
||||
libsearpc = Libsearpc()
|
||||
seafile = Seafile()
|
||||
seahub = Seahub()
|
||||
|
||||
libsearpc.uncompress()
|
||||
libsearpc.build()
|
||||
|
||||
seafile.uncompress()
|
||||
seafile.build()
|
||||
|
||||
seahub.uncompress()
|
||||
seahub.build()
|
||||
|
||||
copy_scripts_and_libs()
|
||||
gen_tarball()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,368 +0,0 @@
|
||||
#coding: UTF-8
|
||||
|
||||
'''This script would check if there is admin, and prompt the user to create a new one if non exist'''
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import re
|
||||
import shutil
|
||||
import glob
|
||||
import subprocess
|
||||
import hashlib
|
||||
import getpass
|
||||
import uuid
|
||||
import warnings
|
||||
|
||||
from configparser import ConfigParser
|
||||
|
||||
from seaserv import ccnet_api
|
||||
|
||||
try:
|
||||
import readline # pylint: disable=W0611
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
SERVER_MANUAL_HTTP = 'https://download.seafile.com/published/seafile-manual/home.md'
|
||||
|
||||
class Utils(object):
|
||||
'''Groups all helper functions here'''
|
||||
@staticmethod
|
||||
def welcome():
|
||||
'''Show welcome message'''
|
||||
welcome_msg = '''\
|
||||
-----------------------------------------------------------------
|
||||
This script will guide you to setup your seafile server using MySQL.
|
||||
Make sure you have read seafile server manual at
|
||||
|
||||
%s
|
||||
|
||||
Press ENTER to continue
|
||||
-----------------------------------------------------------------''' % SERVER_MANUAL_HTTP
|
||||
print(welcome_msg)
|
||||
input()
|
||||
|
||||
@staticmethod
|
||||
def highlight(content):
|
||||
'''Add ANSI color to content to get it highlighted on terminal'''
|
||||
return '\x1b[33m%s\x1b[m' % content
|
||||
|
||||
@staticmethod
|
||||
def info(msg):
|
||||
print(msg)
|
||||
|
||||
@staticmethod
|
||||
def error(msg):
|
||||
'''Print error and exit'''
|
||||
print()
|
||||
print('Error: ' + msg)
|
||||
sys.exit(1)
|
||||
|
||||
@staticmethod
|
||||
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
|
||||
'''Run a program and wait it to finish, and return its exit code. The
|
||||
standard output of this program is supressed.
|
||||
|
||||
'''
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
if suppress_stdout:
|
||||
stdout = devnull
|
||||
else:
|
||||
stdout = sys.stdout
|
||||
|
||||
if suppress_stderr:
|
||||
stderr = devnull
|
||||
else:
|
||||
stderr = sys.stderr
|
||||
|
||||
proc = subprocess.Popen(argv,
|
||||
cwd=cwd,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
env=env)
|
||||
return proc.wait()
|
||||
|
||||
@staticmethod
|
||||
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
|
||||
'''Like run_argv but specify a command line string instead of argv'''
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
if suppress_stdout:
|
||||
stdout = devnull
|
||||
else:
|
||||
stdout = sys.stdout
|
||||
|
||||
if suppress_stderr:
|
||||
stderr = devnull
|
||||
else:
|
||||
stderr = sys.stderr
|
||||
|
||||
proc = subprocess.Popen(cmdline,
|
||||
cwd=cwd,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
env=env,
|
||||
shell=True)
|
||||
return proc.wait()
|
||||
|
||||
@staticmethod
|
||||
def prepend_env_value(name, value, env=None, seperator=':'):
|
||||
'''prepend a new value to a list'''
|
||||
if env is None:
|
||||
env = os.environ
|
||||
|
||||
try:
|
||||
current_value = env[name]
|
||||
except KeyError:
|
||||
current_value = ''
|
||||
|
||||
new_value = value
|
||||
if current_value:
|
||||
new_value += seperator + current_value
|
||||
|
||||
env[name] = new_value
|
||||
|
||||
@staticmethod
|
||||
def must_mkdir(path):
|
||||
'''Create a directory, exit on failure'''
|
||||
try:
|
||||
os.mkdir(path)
|
||||
except OSError as e:
|
||||
Utils.error('failed to create directory %s:%s' % (path, e))
|
||||
|
||||
@staticmethod
|
||||
def must_copy(src, dst):
|
||||
'''Copy src to dst, exit on failure'''
|
||||
try:
|
||||
shutil.copy(src, dst)
|
||||
except Exception as e:
|
||||
Utils.error('failed to copy %s to %s: %s' % (src, dst, e))
|
||||
|
||||
@staticmethod
|
||||
def find_in_path(prog):
|
||||
if 'win32' in sys.platform:
|
||||
sep = ';'
|
||||
else:
|
||||
sep = ':'
|
||||
|
||||
dirs = os.environ['PATH'].split(sep)
|
||||
for d in dirs:
|
||||
d = d.strip()
|
||||
if d == '':
|
||||
continue
|
||||
path = os.path.join(d, prog)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_python_executable():
|
||||
'''Return the python executable. This should be the PYTHON environment
|
||||
variable which is set in setup-seafile-mysql.sh
|
||||
|
||||
'''
|
||||
return os.environ['PYTHON']
|
||||
|
||||
@staticmethod
|
||||
def read_config(fn):
|
||||
'''Return a case sensitive ConfigParser by reading the file "fn"'''
|
||||
cp = ConfigParser()
|
||||
cp.optionxform = str
|
||||
cp.read(fn)
|
||||
|
||||
return cp
|
||||
|
||||
@staticmethod
|
||||
def write_config(cp, fn):
|
||||
'''Return a case sensitive ConfigParser by reading the file "fn"'''
|
||||
with open(fn, 'w') as fp:
|
||||
cp.write(fp)
|
||||
|
||||
@staticmethod
|
||||
def ask_question(desc,
|
||||
key=None,
|
||||
note=None,
|
||||
default=None,
|
||||
validate=None,
|
||||
yes_or_no=False,
|
||||
password=False):
|
||||
'''Ask a question, return the answer.
|
||||
@desc description, e.g. "What is the port of ccnet?"
|
||||
|
||||
@key a name to represent the target of the question, e.g. "port for
|
||||
ccnet server"
|
||||
|
||||
@note additional information for the question, e.g. "Must be a valid
|
||||
port number"
|
||||
|
||||
@default the default value of the question. If the default value is
|
||||
not None, when the user enter nothing and press [ENTER], the default
|
||||
value would be returned
|
||||
|
||||
@validate a function that takes the user input as the only parameter
|
||||
and validate it. It should return a validated value, or throws an
|
||||
"InvalidAnswer" exception if the input is not valid.
|
||||
|
||||
@yes_or_no If true, the user must answer "yes" or "no", and a boolean
|
||||
value would be returned
|
||||
|
||||
@password If true, the user input would not be echoed to the
|
||||
console
|
||||
|
||||
'''
|
||||
assert key or yes_or_no
|
||||
# Format description
|
||||
print()
|
||||
if note:
|
||||
desc += '\n' + note
|
||||
|
||||
desc += '\n'
|
||||
if yes_or_no:
|
||||
desc += '[ yes or no ]'
|
||||
else:
|
||||
if default:
|
||||
desc += '[ default "%s" ]' % default
|
||||
else:
|
||||
desc += '[ %s ]' % key
|
||||
|
||||
desc += ' '
|
||||
while True:
|
||||
# prompt for user input
|
||||
if password:
|
||||
answer = getpass.getpass(desc).strip()
|
||||
else:
|
||||
answer = input(desc).strip()
|
||||
|
||||
# No user input: use default
|
||||
if not answer:
|
||||
if default:
|
||||
answer = default
|
||||
else:
|
||||
continue
|
||||
|
||||
# Have user input: validate answer
|
||||
if yes_or_no:
|
||||
if answer not in ['yes', 'no']:
|
||||
print(Utils.highlight('\nPlease answer yes or no\n'))
|
||||
continue
|
||||
else:
|
||||
return answer == 'yes'
|
||||
else:
|
||||
if validate:
|
||||
try:
|
||||
return validate(answer)
|
||||
except InvalidAnswer as e:
|
||||
print(Utils.highlight('\n%s\n' % e))
|
||||
continue
|
||||
else:
|
||||
return answer
|
||||
|
||||
@staticmethod
|
||||
def validate_port(port):
|
||||
try:
|
||||
port = int(port)
|
||||
except ValueError:
|
||||
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
|
||||
|
||||
if port <= 0 or port > 65535:
|
||||
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
|
||||
|
||||
return port
|
||||
|
||||
|
||||
class InvalidAnswer(Exception):
|
||||
def __init__(self, msg):
|
||||
Exception.__init__(self)
|
||||
self.msg = msg
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
### END of Utils
|
||||
####################
|
||||
|
||||
def need_create_admin():
|
||||
users = ccnet_api.get_emailusers('DB', 0, 1)
|
||||
return len(users) == 0
|
||||
|
||||
def create_admin(email, passwd):
|
||||
if ccnet_api.add_emailuser(email, passwd, 1, 1) < 0:
|
||||
raise Exception('failed to create admin')
|
||||
else:
|
||||
print('\n\n')
|
||||
print('----------------------------------------')
|
||||
print('Successfully created seafile admin')
|
||||
print('----------------------------------------')
|
||||
print('\n\n')
|
||||
|
||||
def ask_admin_email():
|
||||
print()
|
||||
print('----------------------------------------')
|
||||
print('It\'s the first time you start the seafile server. Now let\'s create the admin account')
|
||||
print('----------------------------------------')
|
||||
def validate(email):
|
||||
# whitespace is not allowed
|
||||
if re.match(r'[\s]', email):
|
||||
raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email))
|
||||
# must be a valid email address
|
||||
if not re.match(r'^.+@.*\..+$', email):
|
||||
raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email))
|
||||
|
||||
return email
|
||||
|
||||
key = 'admin email'
|
||||
question = 'What is the ' + Utils.highlight('email') + ' for the admin account?'
|
||||
return Utils.ask_question(question,
|
||||
key=key,
|
||||
validate=validate)
|
||||
|
||||
def ask_admin_password():
|
||||
def validate(password):
|
||||
key = 'admin password again'
|
||||
question = 'Enter the ' + Utils.highlight('password again:')
|
||||
password_again = Utils.ask_question(question,
|
||||
key=key,
|
||||
password=True)
|
||||
|
||||
if password_again != password:
|
||||
raise InvalidAnswer('password mismatch')
|
||||
|
||||
return password
|
||||
|
||||
key = 'admin password'
|
||||
question = 'What is the ' + Utils.highlight('password') + ' for the admin account?'
|
||||
return Utils.ask_question(question,
|
||||
key=key,
|
||||
password=True,
|
||||
validate=validate)
|
||||
|
||||
|
||||
def main():
|
||||
if not need_create_admin():
|
||||
return
|
||||
|
||||
password_file = os.path.join(os.environ['SEAFILE_CENTRAL_CONF_DIR'], 'admin.txt')
|
||||
if os.path.exists(password_file):
|
||||
with open(password_file, 'r') as fp:
|
||||
pwinfo = json.load(fp)
|
||||
email = pwinfo['email']
|
||||
passwd = pwinfo['password']
|
||||
os.unlink(password_file)
|
||||
else:
|
||||
email = ask_admin_email()
|
||||
passwd = ask_admin_password()
|
||||
|
||||
create_admin(email, passwd)
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print('\n\n\n')
|
||||
print(Utils.highlight('Aborted.'))
|
||||
print()
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print()
|
||||
print(Utils.highlight('Error happened during creating seafile admin.'))
|
||||
print()
|
@ -1,4 +0,0 @@
|
||||
@echo off
|
||||
cd /d %~dp0
|
||||
set PYTHONPATH=%PYTHONPATH%;%~dp0\seahub\thirdpart
|
||||
start python upgrade/py/gc.py
|
@ -1,66 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0")
|
||||
INSTALLPATH=$(dirname "${SCRIPT}")
|
||||
TOPDIR=$(dirname "${INSTALLPATH}")
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
central_config_dir=${TOPDIR}/conf
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python3 2>/dev/null 1>&2; then
|
||||
PYTHON=python3
|
||||
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
|
||||
echo
|
||||
exit 1
|
||||
else
|
||||
PYTHON="python"$(python --version | cut -b 8-10)
|
||||
if !which $PYTHON 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "Can't find a python executable of $PYTHON in PATH"
|
||||
echo "Install $PYTHON before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function validate_seafile_data_dir () {
|
||||
if [[ ! -d ${default_seafile_data_dir} ]]; then
|
||||
echo "Error: there is no seafile server data directory."
|
||||
echo "Have you run setup-seafile.sh before this?"
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function prepare_seahub_log_dir() {
|
||||
logdir=${TOPDIR}/logs
|
||||
if ! [[ -d ${logsdir} ]]; then
|
||||
if ! mkdir -p "${logdir}"; then
|
||||
echo "ERROR: failed to create logs dir \"${logdir}\""
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
export SEAHUB_LOG_DIR=${logdir}
|
||||
}
|
||||
|
||||
check_python_executable;
|
||||
validate_seafile_data_dir;
|
||||
prepare_seahub_log_dir;
|
||||
|
||||
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
|
||||
export SEAFILE_CONF_DIR=${default_seafile_data_dir}
|
||||
export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
|
||||
export SEAFILE_RPC_PIPE_PATH=${INSTALLPATH}/runtime
|
||||
|
||||
manage_py=${INSTALLPATH}/seahub/manage.py
|
||||
exec "$PYTHON" "$manage_py" createsuperuser
|
@ -1,62 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo ""
|
||||
|
||||
SCRIPT=$(readlink -f "$0")
|
||||
INSTALLPATH=$(dirname "${SCRIPT}")
|
||||
TOPDIR=$(dirname "${INSTALLPATH}")
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_conf_dir=${TOPDIR}/conf
|
||||
seaf_fsck=${INSTALLPATH}/seafile/bin/seaf-fsck
|
||||
|
||||
export PATH=${INSTALLPATH}/seafile/bin:$PATH
|
||||
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
|
||||
|
||||
script_name=$0
|
||||
function usage () {
|
||||
echo "usage : "
|
||||
echo "$(basename ${script_name}) [-h/--help] [-r/--repair] [-E/--export path_to_export] [repo_id_1 [repo_id_2 ...]]"
|
||||
echo ""
|
||||
}
|
||||
|
||||
function validate_seafile_data_dir () {
|
||||
if [[ ! -d ${default_seafile_data_dir} ]]; then
|
||||
echo "Error: there is no seafile server data directory."
|
||||
echo "Have you run setup-seafile.sh before this?"
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function run_seaf_fsck () {
|
||||
validate_seafile_data_dir;
|
||||
|
||||
echo "Starting seaf-fsck, please wait ..."
|
||||
echo
|
||||
|
||||
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_fsck} \
|
||||
-c "${default_ccnet_conf_dir}" -d "${default_seafile_data_dir}" \
|
||||
-F "${default_conf_dir}" \
|
||||
${seaf_fsck_opts}
|
||||
|
||||
echo "seaf-fsck run done"
|
||||
echo
|
||||
}
|
||||
|
||||
if [ $# -gt 0 ];
|
||||
then
|
||||
for param in $@;
|
||||
do
|
||||
if [ ${param} = "-h" -o ${param} = "--help" ];
|
||||
then
|
||||
usage;
|
||||
exit 1;
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
seaf_fsck_opts=$@
|
||||
run_seaf_fsck;
|
||||
|
||||
echo "Done."
|
@ -1,122 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo ""
|
||||
|
||||
SCRIPT=$(readlink -f "$0")
|
||||
INSTALLPATH=$(dirname "${SCRIPT}")
|
||||
TOPDIR=$(dirname "${INSTALLPATH}")
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_conf_dir=${TOPDIR}/conf
|
||||
seaf_fuse=${INSTALLPATH}/seafile/bin/seaf-fuse
|
||||
|
||||
export PATH=${INSTALLPATH}/seafile/bin:$PATH
|
||||
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
|
||||
|
||||
script_name=$0
|
||||
function usage () {
|
||||
echo "usage : "
|
||||
echo "$(basename ${script_name}) { start <mount-point> | stop | restart <mount-point> } "
|
||||
echo ""
|
||||
}
|
||||
|
||||
# check args
|
||||
if [[ "$1" != "start" && "$1" != "stop" && "$1" != "restart" ]]; then
|
||||
usage;
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
if [[ ($1 == "start" || $1 == "restart" ) && $# -lt 2 ]]; then
|
||||
usage;
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $1 == "stop" && $# != 1 ]]; then
|
||||
usage;
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function validate_seafile_data_dir () {
|
||||
if [[ ! -d ${default_seafile_data_dir} ]]; then
|
||||
echo "Error: there is no seafile server data directory."
|
||||
echo "Have you run setup-seafile.sh before this?"
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function validate_already_running () {
|
||||
if pid=$(pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null); then
|
||||
echo "seaf-fuse is already running, pid $pid"
|
||||
echo
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function warning_if_seafile_not_running () {
|
||||
if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?"
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
function start_seaf_fuse () {
|
||||
validate_already_running;
|
||||
warning_if_seafile_not_running;
|
||||
validate_seafile_data_dir;
|
||||
|
||||
echo "Starting seaf-fuse, please wait ..."
|
||||
|
||||
logfile=${TOPDIR}/logs/seaf-fuse.log
|
||||
|
||||
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_fuse} \
|
||||
-c "${default_ccnet_conf_dir}" \
|
||||
-d "${default_seafile_data_dir}" \
|
||||
-F "${default_conf_dir}" \
|
||||
-l "${logfile}" \
|
||||
"$@"
|
||||
|
||||
sleep 2
|
||||
|
||||
# check if seaf-fuse started successfully
|
||||
if ! pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
|
||||
echo "Failed to start seaf-fuse"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
echo "seaf-fuse started"
|
||||
echo
|
||||
}
|
||||
|
||||
function stop_seaf_fuse() {
|
||||
if ! pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
|
||||
echo "seaf-fuse not running yet"
|
||||
return 1;
|
||||
fi
|
||||
|
||||
echo "Stopping seaf-fuse ..."
|
||||
pkill -SIGTERM -f "seaf-fuse -c ${default_ccnet_conf_dir}"
|
||||
return 0
|
||||
}
|
||||
|
||||
function restart_seaf_fuse () {
|
||||
stop_seaf_fuse
|
||||
sleep 2
|
||||
start_seaf_fuse $@
|
||||
}
|
||||
|
||||
case $1 in
|
||||
"start" )
|
||||
shift
|
||||
start_seaf_fuse $@;
|
||||
;;
|
||||
"stop" )
|
||||
stop_seaf_fuse;
|
||||
;;
|
||||
"restart" )
|
||||
shift
|
||||
restart_seaf_fuse $@;
|
||||
esac
|
||||
|
||||
echo "Done."
|
@ -1,91 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo ""
|
||||
|
||||
SCRIPT=$(readlink -f "$0")
|
||||
INSTALLPATH=$(dirname "${SCRIPT}")
|
||||
TOPDIR=$(dirname "${INSTALLPATH}")
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_conf_dir=${TOPDIR}/conf
|
||||
seaf_gc=${INSTALLPATH}/seafile/bin/seafserv-gc
|
||||
seaf_gc_opts=""
|
||||
|
||||
export PATH=${INSTALLPATH}/seafile/bin:$PATH
|
||||
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
|
||||
|
||||
script_name=$0
|
||||
function usage () {
|
||||
echo "usage : "
|
||||
echo "$(basename ${script_name}) [--dry-run | -D] [--rm-deleted | -r] [repo-id1] [repo-id2]"
|
||||
echo ""
|
||||
}
|
||||
|
||||
|
||||
function validate_seafile_data_dir () {
|
||||
if [[ ! -d ${default_seafile_data_dir} ]]; then
|
||||
echo "Error: there is no seafile server data directory."
|
||||
echo "Have you run setup-seafile.sh before this?"
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function check_component_running() {
|
||||
name=$1
|
||||
cmd=$2
|
||||
if pid=$(pgrep -f "$cmd" 2>/dev/null); then
|
||||
echo "[$name] is running, pid $pid. You can stop it by: "
|
||||
echo
|
||||
echo " kill $pid"
|
||||
echo
|
||||
echo "Stop it and try again."
|
||||
echo
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
function validate_already_running () {
|
||||
if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then
|
||||
echo "seafile server is still running, stop it by \"seafile.sh stop\""
|
||||
echo
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}"
|
||||
check_component_running "fileserver" "fileserver -c ${default_ccnet_conf_dir}"
|
||||
check_component_running "seafdav" "wsgidav.server.server_cli"
|
||||
}
|
||||
|
||||
function run_seaf_gc () {
|
||||
validate_already_running;
|
||||
validate_seafile_data_dir;
|
||||
|
||||
echo "Starting seafserv-gc, please wait ..."
|
||||
|
||||
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_gc} \
|
||||
-c "${default_ccnet_conf_dir}" \
|
||||
-d "${default_seafile_data_dir}" \
|
||||
-F "${default_conf_dir}" \
|
||||
${seaf_gc_opts}
|
||||
|
||||
echo "seafserv-gc run done"
|
||||
echo
|
||||
}
|
||||
|
||||
if [ $# -gt 0 ];
|
||||
then
|
||||
for param in $@;
|
||||
do
|
||||
if [ ${param} = "-h" -o ${param} = "--help" ];
|
||||
then
|
||||
usage;
|
||||
exit 1;
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
seaf_gc_opts=$@
|
||||
run_seaf_gc;
|
||||
|
||||
echo "Done."
|
@ -1,153 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: seafile
|
||||
# Required-Start: $local_fs $remote_fs $network
|
||||
# Required-Stop: $local_fs
|
||||
# Default-Start: 1 2 3 4 5
|
||||
# Default-Stop:
|
||||
# Short-Description: Starts Seafile Server
|
||||
# Description: starts Seafile Server
|
||||
### END INIT INFO
|
||||
|
||||
echo ""
|
||||
|
||||
SCRIPT=$(readlink -f "$0")
|
||||
INSTALLPATH=$(dirname "${SCRIPT}")
|
||||
TOPDIR=$(dirname "${INSTALLPATH}")
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
central_config_dir=${TOPDIR}/conf
|
||||
seaf_controller="${INSTALLPATH}/seafile/bin/seafile-controller"
|
||||
|
||||
export PATH=${INSTALLPATH}/seafile/bin:$PATH
|
||||
export ORIG_LD_LIBRARY_PATH=${LD_LIBRARY_PATH}
|
||||
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
|
||||
|
||||
script_name=$0
|
||||
function usage () {
|
||||
echo "usage : "
|
||||
echo "$(basename ${script_name}) { start | stop | restart } "
|
||||
echo ""
|
||||
}
|
||||
|
||||
# check args
|
||||
if [[ $# != 1 || ( "$1" != "start" && "$1" != "stop" && "$1" != "restart" ) ]]; then
|
||||
usage;
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
function validate_running_user () {
|
||||
real_data_dir=`readlink -f ${default_seafile_data_dir}`
|
||||
running_user=`id -un`
|
||||
data_dir_owner=`stat -c %U ${real_data_dir}`
|
||||
|
||||
if [[ "${running_user}" != "${data_dir_owner}" ]]; then
|
||||
echo "Error: the user running the script (\"${running_user}\") is not the owner of \"${real_data_dir}\" folder, you should use the user \"${data_dir_owner}\" to run the script."
|
||||
exit -1;
|
||||
fi
|
||||
}
|
||||
|
||||
function validate_central_conf_dir () {
|
||||
if [[ ! -d ${central_config_dir} ]]; then
|
||||
echo "Error: there is no conf/ directory."
|
||||
echo "Have you run setup-seafile.sh before this?"
|
||||
echo ""
|
||||
exit -1;
|
||||
fi
|
||||
}
|
||||
|
||||
function validate_seafile_data_dir () {
|
||||
if [[ ! -d ${default_seafile_data_dir} ]]; then
|
||||
echo "Error: there is no seafile server data directory."
|
||||
echo "Have you run setup-seafile.sh before this?"
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function check_component_running() {
|
||||
name=$1
|
||||
cmd=$2
|
||||
if pid=$(pgrep -f "$cmd" 2>/dev/null); then
|
||||
echo "[$name] is running, pid $pid. You can stop it by: "
|
||||
echo
|
||||
echo " kill $pid"
|
||||
echo
|
||||
echo "Stop it and try again."
|
||||
echo
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
function validate_already_running () {
|
||||
if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then
|
||||
echo "Seafile controller is already running, pid $pid"
|
||||
echo
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}"
|
||||
check_component_running "fileserver" "fileserver -c ${default_ccnet_conf_dir}"
|
||||
check_component_running "seafdav" "wsgidav.server.server_cli"
|
||||
}
|
||||
|
||||
function start_seafile_server () {
|
||||
validate_already_running;
|
||||
validate_central_conf_dir;
|
||||
validate_seafile_data_dir;
|
||||
validate_running_user;
|
||||
|
||||
echo "Starting seafile server, please wait ..."
|
||||
|
||||
mkdir -p $TOPDIR/logs
|
||||
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_controller} \
|
||||
-c "${default_ccnet_conf_dir}" \
|
||||
-d "${default_seafile_data_dir}" \
|
||||
-F "${central_config_dir}"
|
||||
|
||||
sleep 3
|
||||
|
||||
# check if seafile server started successfully
|
||||
if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
|
||||
echo "Failed to start seafile server"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
echo "Seafile server started"
|
||||
echo
|
||||
}
|
||||
|
||||
function stop_seafile_server () {
|
||||
if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
|
||||
echo "seafile server not running yet"
|
||||
return 1;
|
||||
fi
|
||||
|
||||
echo "Stopping seafile server ..."
|
||||
pkill -SIGTERM -f "seafile-controller -c ${default_ccnet_conf_dir}"
|
||||
pkill -f "seaf-server -c ${default_ccnet_conf_dir}"
|
||||
pkill -f "fileserver -c ${default_ccnet_conf_dir}"
|
||||
pkill -f "soffice.*--invisible --nocrashreport"
|
||||
pkill -f "wsgidav.server.server_cli"
|
||||
return 0
|
||||
}
|
||||
|
||||
function restart_seafile_server () {
|
||||
stop_seafile_server;
|
||||
sleep 2
|
||||
start_seafile_server;
|
||||
}
|
||||
|
||||
case $1 in
|
||||
"start" )
|
||||
start_seafile_server;
|
||||
;;
|
||||
"stop" )
|
||||
stop_seafile_server;
|
||||
;;
|
||||
"restart" )
|
||||
restart_seafile_server;
|
||||
esac
|
||||
|
||||
echo "Done."
|
@ -1,121 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#coding: utf-8
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from threading import Thread
|
||||
import queue
|
||||
import rados
|
||||
|
||||
from seafobj.objstore_factory import SeafObjStoreFactory
|
||||
|
||||
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
|
||||
|
||||
class Worker(Thread):
|
||||
def __init__(self, do_work, task_queue):
|
||||
Thread.__init__(self)
|
||||
self.do_work = do_work
|
||||
self.task_queue = task_queue
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
try:
|
||||
task = self.task_queue.get()
|
||||
if task is None:
|
||||
break
|
||||
self.do_work(task)
|
||||
except Exception as e:
|
||||
logging.warning('Failed to execute task: %s' % e)
|
||||
finally:
|
||||
self.task_queue.task_done()
|
||||
|
||||
class ThreadPool(object):
|
||||
def __init__(self, do_work, nworker=20):
|
||||
self.do_work = do_work
|
||||
self.nworker = nworker
|
||||
self.task_queue = queue.Queue()
|
||||
|
||||
def start(self):
|
||||
for i in range(self.nworker):
|
||||
Worker(self.do_work, self.task_queue).start()
|
||||
|
||||
def put_task(self, task):
|
||||
self.task_queue.put(task)
|
||||
|
||||
def join(self):
|
||||
self.task_queue.join()
|
||||
# notify all thread to stop
|
||||
for i in range(self.nworker):
|
||||
self.task_queue.put(None)
|
||||
|
||||
class Task(object):
|
||||
def __init__(self, repo_id, repo_version, obj_id):
|
||||
self.repo_id = repo_id
|
||||
self.repo_version = repo_version
|
||||
self.obj_id = obj_id
|
||||
|
||||
class ObjMigrateWorker(Thread):
|
||||
def __init__(self, orig_obj_factory, dest_obj_factory, dtype):
|
||||
Thread.__init__(self)
|
||||
self.dtype = dtype
|
||||
self.orig_store = orig_obj_factory.get_obj_store(dtype)
|
||||
self.dest_store = dest_obj_factory.get_obj_store(dtype)
|
||||
self.thread_pool = ThreadPool(self.do_work)
|
||||
|
||||
def run(self):
|
||||
logging.info('Start to migrate [%s] object' % self.dtype)
|
||||
self.thread_pool.start()
|
||||
self.migrate()
|
||||
self.thread_pool.join()
|
||||
logging.info('Complete migrate [%s] object' % self.dtype)
|
||||
|
||||
def do_work(self, task):
|
||||
ioctx = self.dest_store.ceph_client.ioctx_pool.get_ioctx(task.repo_id)
|
||||
try:
|
||||
ioctx.stat(task.obj_id)
|
||||
except rados.ObjectNotFound:
|
||||
try:
|
||||
data = self.orig_store.read_obj_raw(task.repo_id, task.repo_version, task.obj_id)
|
||||
except Exception as e:
|
||||
logging.warning('[%s] Failed to read object %s from repo %s: %s' % (self.dtype, task.obj_id, task.repo_id, e))
|
||||
raise
|
||||
|
||||
try:
|
||||
ioctx.write_full(task.obj_id, data)
|
||||
except Exception as e:
|
||||
logging.warning('[%s] Failed to write object %s of repo %s to Ceph: %s' % (self.dtype, task.obj_id, task.repo_id, e))
|
||||
raise
|
||||
except Exception as e:
|
||||
logging.warning('[%s] Failed to stat object %s of repo %s in Ceph: %s' % (self.dtype, task.obj_id, task.repo_id, e))
|
||||
raise
|
||||
finally:
|
||||
self.dest_store.ceph_client.ioctx_pool.return_ioctx(ioctx)
|
||||
|
||||
def migrate(self):
|
||||
top_path = self.orig_store.obj_dir
|
||||
for repo_id in os.listdir(top_path):
|
||||
repo_path = os.path.join(top_path, repo_id)
|
||||
for spath in os.listdir(repo_path):
|
||||
obj_path = os.path.join(repo_path, spath)
|
||||
for lpath in os.listdir(obj_path):
|
||||
obj_id = spath + lpath
|
||||
task = Task(repo_id, 1, obj_id)
|
||||
self.thread_pool.put_task(task)
|
||||
|
||||
def main():
|
||||
try:
|
||||
fs_obj_factory = SeafObjStoreFactory()
|
||||
os.environ['SEAFILE_CENTRAL_CONF_DIR'] = os.environ['CEPH_SEAFILE_CENTRAL_CONF_DIR']
|
||||
except KeyError:
|
||||
logging.warning('CEPH_SEAFILE_CENTRAL_CONF_DIR environment variable is not set.\n')
|
||||
sys.exit()
|
||||
|
||||
ceph_obj_factory = SeafObjStoreFactory()
|
||||
|
||||
dtypes = ['commits', 'fs', 'blocks']
|
||||
for dtype in dtypes:
|
||||
ObjMigrateWorker(fs_obj_factory, ceph_obj_factory, dtype).start()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,15 +0,0 @@
|
||||
import os
|
||||
|
||||
daemon = True
|
||||
workers = 5
|
||||
|
||||
# Logging
|
||||
runtime_dir = os.path.dirname(__file__)
|
||||
pidfile = os.path.join(runtime_dir, 'seahub.pid')
|
||||
errorlog = os.path.join(runtime_dir, 'error.log')
|
||||
|
||||
# disable access log
|
||||
#accesslog = os.path.join(runtime_dir, 'access.log')
|
||||
|
||||
# for file upload, we need a longer timeout value (default is only 30s, too short)
|
||||
timeout = 1200
|
@ -1,294 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: seahub
|
||||
# Required-Start: $local_fs $remote_fs $network
|
||||
# Required-Stop: $local_fs
|
||||
# Default-Start: 1 2 3 4 5
|
||||
# Default-Stop:
|
||||
# Short-Description: Starts Seahub
|
||||
# Description: starts Seahub
|
||||
### END INIT INFO
|
||||
|
||||
echo ""
|
||||
|
||||
SCRIPT=$(readlink -f "$0")
|
||||
INSTALLPATH=$(dirname "${SCRIPT}")
|
||||
TOPDIR=$(dirname "${INSTALLPATH}")
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
central_config_dir=${TOPDIR}/conf
|
||||
seafile_rpc_pipe_path=${INSTALLPATH}/runtime
|
||||
|
||||
manage_py=${INSTALLPATH}/seahub/manage.py
|
||||
gunicorn_conf=${TOPDIR}/conf/gunicorn.conf.py
|
||||
pidfile=${TOPDIR}/pids/seahub.pid
|
||||
errorlog=${TOPDIR}/logs/gunicorn_error.log
|
||||
accesslog=${TOPDIR}/logs/gunicorn_access.log
|
||||
gunicorn_exe=${INSTALLPATH}/seahub/thirdpart/bin/gunicorn
|
||||
|
||||
script_name=$0
|
||||
function usage () {
|
||||
echo "Usage: "
|
||||
echo
|
||||
echo " $(basename ${script_name}) { start <port> | stop | restart <port> }"
|
||||
echo
|
||||
echo "To run seahub in fastcgi:"
|
||||
echo
|
||||
echo " $(basename ${script_name}) { start-fastcgi <port> | stop | restart-fastcgi <port> }"
|
||||
echo
|
||||
echo "<port> is optional, and defaults to 8000"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Check args
|
||||
if [[ $1 != "start" && $1 != "stop" && $1 != "restart" \
|
||||
&& $1 != "start-fastcgi" && $1 != "restart-fastcgi" && $1 != "clearsessions" && $1 != "python-env" ]]; then
|
||||
usage;
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python3 2>/dev/null 1>&2; then
|
||||
PYTHON=python3
|
||||
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
|
||||
echo
|
||||
exit 1
|
||||
else
|
||||
PYTHON="python"$(python --version | cut -b 8-10)
|
||||
if !which $PYTHON 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "Can't find a python executable of $PYTHON in PATH"
|
||||
echo "Install $PYTHON before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function validate_seafile_data_dir () {
|
||||
if [[ ! -d ${default_seafile_data_dir} ]]; then
|
||||
echo "Error: there is no seafile server data directory."
|
||||
echo "Have you run setup-seafile.sh before this?"
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function validate_seahub_running () {
|
||||
if pgrep -f "${manage_py}" 2>/dev/null 1>&2; then
|
||||
echo "Seahub is already running."
|
||||
exit 1;
|
||||
elif pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then
|
||||
echo "Seahub is already running."
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function validate_port () {
|
||||
if ! [[ ${port} =~ ^[1-9][0-9]{1,4}$ ]] ; then
|
||||
printf "\033[033m${port}\033[m is not a valid port number\n\n"
|
||||
usage;
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ ($1 == "start" || $1 == "restart" || $1 == "start-fastcgi" || $1 == "restart-fastcgi") \
|
||||
&& ($# == 2 || $# == 1) ]]; then
|
||||
if [[ $# == 2 ]]; then
|
||||
port=$2
|
||||
validate_port
|
||||
else
|
||||
port=8000
|
||||
fi
|
||||
elif [[ $1 == "stop" && $# == 1 ]]; then
|
||||
dummy=dummy
|
||||
elif [[ $1 == "clearsessions" && $# == 1 ]]; then
|
||||
dummy=dummy
|
||||
elif [[ $1 == "python-env" ]]; then
|
||||
dummy=dummy
|
||||
else
|
||||
usage;
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function warning_if_seafile_not_running () {
|
||||
if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function prepare_seahub_log_dir() {
|
||||
logdir=${TOPDIR}/logs
|
||||
if ! [[ -d ${logsdir} ]]; then
|
||||
if ! mkdir -p "${logdir}"; then
|
||||
echo "ERROR: failed to create logs dir \"${logdir}\""
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
export SEAHUB_LOG_DIR=${logdir}
|
||||
}
|
||||
|
||||
function before_start() {
|
||||
prepare_env;
|
||||
warning_if_seafile_not_running;
|
||||
validate_seahub_running;
|
||||
prepare_seahub_log_dir;
|
||||
}
|
||||
|
||||
function start_seahub () {
|
||||
before_start;
|
||||
echo "Starting seahub at port ${port} ..."
|
||||
check_init_admin;
|
||||
$PYTHON $gunicorn_exe seahub.wsgi:application -c "${gunicorn_conf}" --preload
|
||||
|
||||
# Ensure seahub is started successfully
|
||||
sleep 5
|
||||
if ! pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then
|
||||
printf "\033[33mError:Seahub failed to start.\033[m\n"
|
||||
echo "Please try to run \"./seahub.sh start\" again"
|
||||
exit 1;
|
||||
fi
|
||||
echo
|
||||
echo "Seahub is started"
|
||||
echo
|
||||
}
|
||||
|
||||
function start_seahub_fastcgi () {
|
||||
before_start;
|
||||
|
||||
# Returns 127.0.0.1 if SEAFILE_FASTCGI_HOST is unset or hasn't got any value,
|
||||
# otherwise returns value of SEAFILE_FASTCGI_HOST environment variable
|
||||
address=`(test -z "$SEAFILE_FASTCGI_HOST" && echo "127.0.0.1") || echo $SEAFILE_FASTCGI_HOST`
|
||||
|
||||
echo "Starting seahub (fastcgi) at ${address}:${port} ..."
|
||||
check_init_admin;
|
||||
$PYTHON "${manage_py}" runfcgi maxchildren=8 host=$address port=$port pidfile=$pidfile \
|
||||
outlog=${accesslog} errlog=${errorlog}
|
||||
|
||||
# Ensure seahub is started successfully
|
||||
sleep 5
|
||||
if ! pgrep -f "${manage_py}" 1>/dev/null; then
|
||||
printf "\033[33mError:Seahub failed to start.\033[m\n"
|
||||
exit 1;
|
||||
fi
|
||||
echo
|
||||
echo "Seahub is started"
|
||||
echo
|
||||
}
|
||||
|
||||
function prepare_env() {
|
||||
check_python_executable;
|
||||
validate_seafile_data_dir;
|
||||
|
||||
if [[ -z "$LANG" ]]; then
|
||||
echo "LANG is not set in ENV, set to en_US.UTF-8"
|
||||
export LANG='en_US.UTF-8'
|
||||
fi
|
||||
if [[ -z "$LC_ALL" ]]; then
|
||||
echo "LC_ALL is not set in ENV, set to en_US.UTF-8"
|
||||
export LC_ALL='en_US.UTF-8'
|
||||
fi
|
||||
|
||||
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
|
||||
export SEAFILE_CONF_DIR=${default_seafile_data_dir}
|
||||
export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
|
||||
export SEAFILE_RPC_PIPE_PATH=${seafile_rpc_pipe_path}
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
|
||||
|
||||
|
||||
}
|
||||
|
||||
function clear_sessions () {
|
||||
prepare_env;
|
||||
|
||||
echo "Start clear expired session records ..."
|
||||
$PYTHON "${manage_py}" clearsessions
|
||||
|
||||
echo
|
||||
echo "Done"
|
||||
echo
|
||||
}
|
||||
|
||||
function stop_seahub () {
|
||||
if [[ -f ${pidfile} ]]; then
|
||||
echo "Stopping seahub ..."
|
||||
pkill -9 -f "thirdpart/bin/gunicorn"
|
||||
sleep 1
|
||||
if pgrep -f "thirdpart/bin/gunicorn" 2>/dev/null 1>&2 ; then
|
||||
echo 'Failed to stop seahub.'
|
||||
exit 1
|
||||
fi
|
||||
rm -f ${pidfile}
|
||||
return 0
|
||||
else
|
||||
echo "Seahub is not running"
|
||||
fi
|
||||
}
|
||||
|
||||
function check_init_admin() {
|
||||
check_init_admin_script=${INSTALLPATH}/check_init_admin.py
|
||||
if ! $PYTHON $check_init_admin_script; then
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function run_python_env() {
|
||||
local pyexec
|
||||
|
||||
prepare_env;
|
||||
|
||||
if which ipython 2>/dev/null; then
|
||||
pyexec=ipython
|
||||
else
|
||||
pyexec=$PYTHON
|
||||
fi
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
$pyexec "$@"
|
||||
else
|
||||
"$@"
|
||||
fi
|
||||
}
|
||||
|
||||
case $1 in
|
||||
"start" )
|
||||
start_seahub;
|
||||
;;
|
||||
"start-fastcgi" )
|
||||
start_seahub_fastcgi;
|
||||
;;
|
||||
"stop" )
|
||||
stop_seahub;
|
||||
;;
|
||||
"restart" )
|
||||
stop_seahub
|
||||
sleep 2
|
||||
start_seahub
|
||||
;;
|
||||
"restart-fastcgi" )
|
||||
stop_seahub
|
||||
sleep 2
|
||||
start_seahub_fastcgi
|
||||
;;
|
||||
"python-env")
|
||||
shift
|
||||
run_python_env "$@"
|
||||
;;
|
||||
"clearsessions" )
|
||||
clear_sessions
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Done."
|
||||
echo ""
|
@ -1,31 +0,0 @@
|
||||
# Server Release Package
|
||||
|
||||
1. Libsearpc
|
||||
cd libsearpc;
|
||||
CFLAGS="-O2" configure --prefix=$dest
|
||||
make install
|
||||
2. Ccnet
|
||||
cd ccnet;
|
||||
CFLAGS="-O2" ./configure --enable-server-pkg --prefix=$dest
|
||||
make install
|
||||
3. Seafile
|
||||
cd seafile;
|
||||
CFLAGS="-O2" configure --enable-server-pkg --prefix=$dest
|
||||
make install
|
||||
4. copy shared libraries
|
||||
scripts/cp-shared-lib.py $dest/lib
|
||||
5. strip libs/executables
|
||||
python do-strip.py
|
||||
6. Update seahub
|
||||
cd seahub
|
||||
git fetch origin
|
||||
git checkout release
|
||||
git rebase origin/master
|
||||
|
||||
7. Pack
|
||||
./pack-server.sh 1.0.0
|
||||
|
||||
DONE!
|
||||
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,58 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
########
|
||||
### This script is a wrapper for setup-seafile-mysql.py
|
||||
########
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT=$(readlink -f "$0")
|
||||
INSTALLPATH=$(dirname "${SCRIPT}")
|
||||
|
||||
cd "$INSTALLPATH"
|
||||
|
||||
python_script=setup-seafile-mysql.py
|
||||
|
||||
function err_and_quit () {
|
||||
printf "\n\n\033[33mError occured during setup. \nPlease fix possible problems and run the script again.\033[m\n\n"
|
||||
exit 1;
|
||||
}
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python3 2>/dev/null 1>&2; then
|
||||
PYTHON=python3
|
||||
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
|
||||
echo
|
||||
err_and_quit
|
||||
else
|
||||
PYTHON="python"$(python --version | cut -b 8-10)
|
||||
if !which $PYTHON 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "Can't find a python executable of $PYTHON in PATH"
|
||||
echo "Install $PYTHON before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
err_and_quit
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function check_python () {
|
||||
echo "Checking python on this machine ..."
|
||||
check_python_executable
|
||||
echo
|
||||
}
|
||||
|
||||
check_python;
|
||||
|
||||
export PYTHON=$PYTHON
|
||||
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python3/site-packages:${INSTALLPATH}/seafile/lib64/python3/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
|
||||
|
||||
exec $PYTHON "$python_script" "$@"
|
@ -1,740 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0")
|
||||
INSTALLPATH=$(dirname "${SCRIPT}")
|
||||
TOPDIR=$(dirname "${INSTALLPATH}")
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_seahub_db=${TOPDIR}/seahub.db
|
||||
default_conf_dir=${TOPDIR}/conf
|
||||
default_pids_dir=${TOPDIR}/pids
|
||||
default_logs_dir=${TOPDIR}/logs
|
||||
|
||||
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
|
||||
|
||||
server_manual_http='https://download.seafile.com/published/seafile-manual/home.md'
|
||||
|
||||
function welcome () {
|
||||
echo "-----------------------------------------------------------------"
|
||||
echo "This script will guide you to config and setup your seafile server."
|
||||
echo -e "\nMake sure you have read seafile server manual at \n\n\t${server_manual_http}\n"
|
||||
echo -e "Note: This script will guide your to setup seafile server using sqlite3,"
|
||||
echo "which may have problems if your disk is on a NFS/CIFS/USB."
|
||||
echo "In these cases, we suggest you setup seafile server using MySQL."
|
||||
echo
|
||||
echo "Press [ENTER] to continue"
|
||||
echo "-----------------------------------------------------------------"
|
||||
read dummy
|
||||
echo
|
||||
}
|
||||
|
||||
function err_and_quit () {
|
||||
printf "\n\n\033[33mError occured during setup. \nPlease fix possible issues and run the script again.\033[m\n\n"
|
||||
exit 1;
|
||||
}
|
||||
|
||||
function on_ctrl_c_pressed () {
|
||||
printf "\n\n\033[33mYou have pressed Ctrl-C. Setup is interrupted.\033[m\n\n"
|
||||
exit 1;
|
||||
}
|
||||
|
||||
# clean newly created ccnet/seafile configs when exit on SIGINT
|
||||
trap on_ctrl_c_pressed 2
|
||||
|
||||
function check_sanity () {
|
||||
if ! [[ -d ${INSTALLPATH}/seahub && -d ${INSTALLPATH}/seafile \
|
||||
&& -d ${INSTALLPATH}/runtime ]]; then
|
||||
echo
|
||||
echo "The seafile-server diretory doesn't contain all needed files."
|
||||
echo "Please make sure you have extracted all files and folders from tarball."
|
||||
err_and_quit;
|
||||
fi
|
||||
}
|
||||
|
||||
function read_yes_no () {
|
||||
printf "[yes|no] "
|
||||
read yesno;
|
||||
while [[ "${yesno}" != "yes" && "${yesno}" != "no" ]]
|
||||
do
|
||||
printf "please answer [yes|no] "
|
||||
read yesno;
|
||||
done
|
||||
|
||||
if [[ "${yesno}" == "no" ]]; then
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
fi
|
||||
}
|
||||
|
||||
function check_existing_ccnet () {
|
||||
if [[ -d ${default_ccnet_conf_dir} ]]; then
|
||||
echo "\033[31m Error: \033[0m Ccnet config dir \"${default_ccnet_conf_dir}\" already exists."
|
||||
echo
|
||||
exit 1;
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
function check_existing_seafile () {
|
||||
if [[ -d ${default_seafile_data_dir} ]]; then
|
||||
echo "\033[31m Error: \033[0m Seafile server data dir \"${default_seafile_data_dir}\" already exists."
|
||||
echo
|
||||
exit 1;
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python3 2>/dev/null 1>&2; then
|
||||
PYTHON=python3
|
||||
elif !(python --version 2>&1 | grep "3\.[0-9]\.[0-9]") 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "The current version of python is not 3.x.x, please use Python 3.x.x ."
|
||||
echo
|
||||
err_and_quit
|
||||
else
|
||||
PYTHON="python"$(python --version | cut -b 8-10)
|
||||
if !which $PYTHON 2>/dev/null 1>&2; then
|
||||
echo
|
||||
echo "Can't find a python executable of $PYTHON in PATH"
|
||||
echo "Install $PYTHON before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
err_and_quit
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Find python: $PYTHON"
|
||||
echo
|
||||
}
|
||||
|
||||
function check_python_module () {
|
||||
module=$1
|
||||
name=$2
|
||||
hint=$3
|
||||
printf " Checking python module: ${name} ... "
|
||||
if ! $PYTHON -c "import ${module}" 2>/dev/null 1>&2; then
|
||||
echo
|
||||
printf "\033[33m ${name} \033[m is not installed, Please install it first.\n"
|
||||
if [[ "${hint}" != "" ]]; then
|
||||
printf "${hint}"
|
||||
echo
|
||||
fi
|
||||
err_and_quit;
|
||||
fi
|
||||
echo -e "Done."
|
||||
}
|
||||
|
||||
function check_python () {
|
||||
echo "Checking python on this machine ..."
|
||||
check_python_executable
|
||||
check_python_module sqlite3 python-sqlite3
|
||||
echo
|
||||
}
|
||||
|
||||
function check_sqlite3 () {
|
||||
echo -n "Checking for sqlite3 ..."
|
||||
if ! which sqlite3 2>/dev/null 1>&2; then
|
||||
echo -e "\nSqlite3 is not found. install it first.\n"
|
||||
echo "On Debian/Ubuntu: apt-get install sqlite3"
|
||||
echo "On CentOS/RHEL: yum install sqlite"
|
||||
err_and_quit;
|
||||
fi
|
||||
printf "Done.\n\n"
|
||||
}
|
||||
|
||||
function check_system_dependency () {
|
||||
printf "Checking packages needed by seafile ...\n\n"
|
||||
check_python;
|
||||
check_sqlite3;
|
||||
printf "Checking Done.\n\n"
|
||||
}
|
||||
|
||||
function ask_question () {
|
||||
question=$1
|
||||
default=$2
|
||||
key=$3
|
||||
printf "${question}"
|
||||
printf "\n"
|
||||
if [[ "${default}" != "" && "${default}" != "nodefault" ]] ; then
|
||||
printf "[default: ${default} ] "
|
||||
elif [[ "${key}" != "" ]]; then
|
||||
printf "[${key}]: "
|
||||
fi
|
||||
}
|
||||
|
||||
function get_server_name () {
|
||||
question="What would you like to use as the name of this seafile server?\nYour seafile users will be able to see the name in their seafile client."
|
||||
hint="You can use a-z, A-Z, 0-9, _ and -, and the length should be 3 ~ 15"
|
||||
ask_question "${question}\n${hint}" "nodefault" "server name"
|
||||
read server_name
|
||||
if [[ "${server_name}" == "" ]]; then
|
||||
echo
|
||||
echo "server name cannot be empty"
|
||||
get_server_name
|
||||
elif [[ ! ${server_name} =~ ^[a-zA-Z0-9_-]{3,14}$ ]]; then
|
||||
printf "\n\033[33m${server_name}\033[m is not a valid name.\n"
|
||||
get_server_name;
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
function get_server_ip_or_domain () {
|
||||
question="What is the ip or domain of this server?\nFor example, www.mycompany.com, or, 192.168.1.101"
|
||||
ask_question "${question}\n" "nodefault" "This server's ip or domain"
|
||||
read ip_or_domain
|
||||
if [[ "${ip_or_domain}" == "" ]]; then
|
||||
echo
|
||||
echo "ip or domain cannot be empty"
|
||||
get_server_ip_or_domain
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
# function get_ccnet_server_port () {
|
||||
# question="What tcp port do you want to use for ccnet server?"
|
||||
# hint="10001 is the recommended port."
|
||||
# default="10001"
|
||||
# ask_question "${question}\n${hint}" "${default}"
|
||||
# read server_port
|
||||
# if [[ "${server_port}" == "" ]]; then
|
||||
# server_port="${default}"
|
||||
# fi
|
||||
# if [[ ! ${server_port} =~ ^[0-9]+$ ]]; then
|
||||
# echo "\"${server_port}\" is not a valid port number. "
|
||||
# get_ccnet_server_port
|
||||
# fi
|
||||
# echo
|
||||
# }
|
||||
|
||||
# function get_seafile_server_port () {
|
||||
# question="What tcp port would you like to use for seafile server?"
|
||||
# hint="12001 is the recommended port."
|
||||
# default="12001"
|
||||
# ask_question "${question}\n${hint}" "${default}"
|
||||
# read seafile_server_port
|
||||
# if [[ "${seafile_server_port}" == "" ]]; then
|
||||
# seafile_server_port="${default}"
|
||||
# fi
|
||||
# if [[ ! ${seafile_server_port} =~ ^[0-9]+$ ]]; then
|
||||
# echo "\"${seafile_server_port}\" is not a valid port number. "
|
||||
# get_seafile_server_port
|
||||
# fi
|
||||
# echo
|
||||
# }
|
||||
|
||||
function get_fileserver_port () {
|
||||
question="What tcp port do you want to use for seafile fileserver?"
|
||||
hint="8082 is the recommended port."
|
||||
default="8082"
|
||||
ask_question "${question}\n${hint}" "${default}"
|
||||
read fileserver_port
|
||||
if [[ "${fileserver_port}" == "" ]]; then
|
||||
fileserver_port="${default}"
|
||||
fi
|
||||
if [[ ! ${fileserver_port} =~ ^[0-9]+$ ]]; then
|
||||
echo "\"${fileserver_port}\" is not a valid port number. "
|
||||
get_fileserver_port
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
|
||||
# function get_seafile_data_dir () {
|
||||
# question="Where would you like to store your seafile data?"
|
||||
# note="Please use a volume with enough free space."
|
||||
# default=${default_seafile_data_dir}
|
||||
# ask_question "${question} \n\033[33mNote: \033[m${note}" "${default}"
|
||||
# read seafile_data_dir
|
||||
# if [[ "${seafile_data_dir}" == "" ]]; then
|
||||
# seafile_data_dir=${default}
|
||||
# fi
|
||||
#
|
||||
# if [[ -d ${seafile_data_dir} && -f ${seafile_data_dir}/seafile.conf ]]; then
|
||||
# echo
|
||||
# echo "It seems that you have already existing seafile data in ${seafile_data_dir}."
|
||||
# echo "Would you like to use the existing seafile data?"
|
||||
# if ! read_yes_no; then
|
||||
# echo "You have chosen not to use existing seafile data in ${seafile_data_dir}"
|
||||
# echo "You need to specify a different seafile data directory or remove ${seafile_data_dir} before continuing."
|
||||
# get_seafile_data_dir
|
||||
# else
|
||||
# use_existing_seafile="true"
|
||||
# fi
|
||||
# elif [[ -d ${seafile_data_dir} && $(ls -A ${seafile_data_dir}) != "" ]]; then
|
||||
# echo
|
||||
# echo "${seafile_data_dir} is an existing non-empty directory. Please specify a different directory"
|
||||
# echo
|
||||
# get_seafile_data_dir
|
||||
# elif [[ ! ${seafile_data_dir} =~ ^/ ]]; then
|
||||
# echo
|
||||
# echo "\"${seafile_data_dir}\" is not an absolute path. Please specify an absolute path."
|
||||
# echo
|
||||
# get_seafile_data_dir
|
||||
# elif [[ ! -d $(dirname ${seafile_data_dir}) ]]; then
|
||||
# echo
|
||||
# echo "The path $(dirname ${seafile_data_dir}) does not exist."
|
||||
# echo
|
||||
# get_seafile_data_dir
|
||||
# fi
|
||||
# echo
|
||||
# }
|
||||
|
||||
function gen_ccnet_conf () {
|
||||
mkdir -p ${default_conf_dir}
|
||||
ccnet_conf=${default_conf_dir}/ccnet.conf
|
||||
if ! $(cat > ${ccnet_conf} <<EOF
|
||||
[General]
|
||||
EOF
|
||||
); then
|
||||
echo "failed to generate ccnet.conf";
|
||||
err_and_quit
|
||||
fi
|
||||
|
||||
mkdir -p ${default_ccnet_conf_dir}
|
||||
}
|
||||
|
||||
function gen_seafile_conf () {
|
||||
mkdir -p ${default_conf_dir}
|
||||
seafile_conf=${default_conf_dir}/seafile.conf
|
||||
if ! $(cat > ${seafile_conf} <<EOF
|
||||
[fileserver]
|
||||
port=$fileserver_port
|
||||
EOF
|
||||
); then
|
||||
echo "failed to generate seafile.conf";
|
||||
err_and_quit
|
||||
fi
|
||||
|
||||
mkdir -p ${default_seafile_data_dir}
|
||||
}
|
||||
|
||||
function gen_gunicorn_conf () {
|
||||
mkdir -p ${default_conf_dir}
|
||||
gunicorn_conf=${default_conf_dir}/gunicorn.conf.py
|
||||
if ! $(cat > ${gunicorn_conf} <<EOF
|
||||
import os
|
||||
|
||||
daemon = True
|
||||
workers = 5
|
||||
|
||||
# default localhost:8000
|
||||
bind = "127.0.0.1:8000"
|
||||
|
||||
# Pid
|
||||
pids_dir = '$default_pids_dir'
|
||||
pidfile = os.path.join(pids_dir, 'seahub.pid')
|
||||
|
||||
# for file upload, we need a longer timeout value (default is only 30s, too short)
|
||||
timeout = 1200
|
||||
|
||||
limit_request_line = 8190
|
||||
EOF
|
||||
); then
|
||||
echo "failed to generate gunicorn.conf.py";
|
||||
err_and_quit
|
||||
fi
|
||||
}
|
||||
|
||||
function gen_seafdav_conf () {
|
||||
mkdir -p ${default_conf_dir}
|
||||
seafdav_conf=${default_conf_dir}/seafdav.conf
|
||||
if ! $(cat > ${seafdav_conf} <<EOF
|
||||
[WEBDAV]
|
||||
enabled = false
|
||||
port = 8080
|
||||
share_name = /
|
||||
EOF
|
||||
); then
|
||||
echo "failed to generate seafdav.conf";
|
||||
err_and_quit
|
||||
fi
|
||||
}
|
||||
|
||||
function copy_user_manuals() {
|
||||
src_docs_dir=${INSTALLPATH}/seafile/docs/
|
||||
library_template_dir=${default_seafile_data_dir}/library-template
|
||||
mkdir -p ${library_template_dir}
|
||||
cp -f ${src_docs_dir}/*.doc ${library_template_dir}
|
||||
}
|
||||
|
||||
function parse_params() {
|
||||
while getopts n:i:p arg; do
|
||||
case $arg in
|
||||
n)
|
||||
server_name=${OPTARG}
|
||||
;;
|
||||
i)
|
||||
ip_or_domain=${OPTARG}
|
||||
;;
|
||||
p)
|
||||
fileserver_port=${OPTARG}
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
function validate_params() {
|
||||
# server_name default hostname -s
|
||||
if [[ "$server_name" == "" ]]; then
|
||||
server_name=${SERVER_NAME:-`hostname -s`}
|
||||
fi
|
||||
if [[ ! ${server_name} =~ ^[a-zA-Z0-9_-]{3,14}$ ]]; then
|
||||
echo "Invalid server name param"
|
||||
err_and_quit;
|
||||
fi
|
||||
|
||||
# ip_or_domain default hostname -i
|
||||
if [[ "$ip_or_domain" == "" ]]; then
|
||||
ip_or_domain=${SERVER_IP:-`hostname -i`}
|
||||
fi
|
||||
if [[ "$ip_or_domain" != "" && ! ${ip_or_domain} =~ ^[^.].+\..+[^.]$ ]]; then
|
||||
echo "Invalid ip or domain param"
|
||||
err_and_quit;
|
||||
fi
|
||||
|
||||
# fileserver_port default 8082
|
||||
if [[ "${fileserver_port}" == "" ]]; then
|
||||
fileserver_port=${FILESERVER_PORT:-8082}
|
||||
fi
|
||||
if [[ ! ${fileserver_port} =~ ^[0-9]+$ ]]; then
|
||||
echo "Invalid fileserver port param"
|
||||
err_and_quit;
|
||||
fi
|
||||
}
|
||||
|
||||
function usage() {
|
||||
echo "auto mode:"
|
||||
echo -e "$0 auto\n" \
|
||||
"-n server name\n" \
|
||||
"-i ip or domain\n" \
|
||||
"-p fileserver port\n" \
|
||||
"-d seafile dir to store seafile data"
|
||||
echo ""
|
||||
echo "interactive mode:"
|
||||
echo "$0"
|
||||
}
|
||||
|
||||
# -------------------------------------------
|
||||
# Main workflow of this script
|
||||
# -------------------------------------------
|
||||
|
||||
for param in $@; do
|
||||
if [[ "$param" == "-h" || "$param" == "--help" ]]; then
|
||||
usage;
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
|
||||
need_pause=1
|
||||
if [[ $# -ge 1 && "$1" == "auto" ]]; then
|
||||
# auto mode, no pause
|
||||
shift
|
||||
parse_params $@;
|
||||
validate_params;
|
||||
need_pause=0
|
||||
fi
|
||||
|
||||
check_sanity;
|
||||
if [[ "${need_pause}" == "1" ]]; then
|
||||
welcome;
|
||||
fi
|
||||
sleep .5
|
||||
check_system_dependency;
|
||||
sleep .5
|
||||
|
||||
check_existing_ccnet;
|
||||
check_existing_seafile;
|
||||
|
||||
if [[ "${server_name}" == "" ]]; then
|
||||
get_server_name;
|
||||
fi
|
||||
|
||||
if [[ "${ip_or_domain}" == "" ]]; then
|
||||
get_server_ip_or_domain;
|
||||
fi
|
||||
|
||||
if [[ "$fileserver_port" == "" ]]; then
|
||||
get_fileserver_port
|
||||
fi
|
||||
|
||||
|
||||
sleep .5
|
||||
|
||||
printf "\nThis is your config information:\n\n"
|
||||
|
||||
printf "server name: \033[33m${server_name}\033[m\n"
|
||||
printf "server ip/domain: \033[33m${ip_or_domain}\033[m\n"
|
||||
|
||||
|
||||
printf "seafile data dir: \033[33m${default_seafile_data_dir}\033[m\n"
|
||||
printf "fileserver port: \033[33m${fileserver_port}\033[m\n"
|
||||
|
||||
|
||||
if [[ "${need_pause}" == "1" ]]; then
|
||||
echo
|
||||
echo "If you are OK with the configuration, press [ENTER] to continue."
|
||||
read dummy
|
||||
fi
|
||||
|
||||
|
||||
# -------------------------------------------
|
||||
# Create ccnet conf
|
||||
# -------------------------------------------
|
||||
|
||||
echo "Generating ccnet configuration in ${default_ccnet_conf_dir}..."
|
||||
echo
|
||||
gen_ccnet_conf;
|
||||
echo
|
||||
|
||||
# -------------------------------------------
|
||||
# Create seafile conf
|
||||
# -------------------------------------------
|
||||
|
||||
echo "Generating seafile configuration in ${default_seafile_data_dir} ..."
|
||||
echo
|
||||
gen_seafile_conf;
|
||||
echo
|
||||
|
||||
|
||||
# -------------------------------------------
|
||||
# Write seafile.ini
|
||||
# -------------------------------------------
|
||||
|
||||
## use default seafile-data path: seafile_data_dir=${TOPDIR}/seafile-data
|
||||
# echo "${seafile_data_dir}" > "${default_ccnet_conf_dir}/seafile.ini"
|
||||
|
||||
# -------------------------------------------
|
||||
# Generate gunicorn.conf.py
|
||||
# -------------------------------------------
|
||||
|
||||
gen_gunicorn_conf;
|
||||
|
||||
# -------------------------------------------
|
||||
# Generate seafevents.conf
|
||||
# -------------------------------------------
|
||||
|
||||
gen_seafdav_conf;
|
||||
|
||||
# -------------------------------------------
|
||||
# generate seahub/settings.py
|
||||
# -------------------------------------------
|
||||
dest_settings_py=${TOPDIR}/conf/seahub_settings.py
|
||||
seahub_secret_keygen=${INSTALLPATH}/seahub/tools/secret_key_generator.py
|
||||
|
||||
if [[ ! -f ${dest_settings_py} ]]; then
|
||||
key=$($PYTHON "${seahub_secret_keygen}")
|
||||
cat > ${dest_settings_py} <<EOF
|
||||
# -*- coding: utf-8 -*-
|
||||
SECRET_KEY = "$key"
|
||||
EOF
|
||||
fi
|
||||
|
||||
# -------------------------------------------
|
||||
# Seahub related config
|
||||
# -------------------------------------------
|
||||
if [[ "${need_pause}" == "1" ]]; then
|
||||
echo "-----------------------------------------------------------------"
|
||||
echo "Seahub is the web interface for seafile server."
|
||||
echo "Now let's setup seahub configuration. Press [ENTER] to continue"
|
||||
echo "-----------------------------------------------------------------"
|
||||
echo
|
||||
read dummy
|
||||
fi
|
||||
|
||||
# echo "Please specify the email address and password for the seahub administrator."
|
||||
# echo "You can use them to login as admin on your seahub website."
|
||||
# echo
|
||||
|
||||
function get_seahub_admin_email () {
|
||||
question="Please specify the email address for the seahub administrator:"
|
||||
ask_question "${question}" "nodefault" "seahub admin email"
|
||||
read seahub_admin_email
|
||||
if [[ "${seahub_admin_email}" == "" ]]; then
|
||||
echo "Seahub admin user name cannot be empty."
|
||||
get_seahub_admin_email;
|
||||
elif [[ ! ${seahub_admin_email} =~ ^.+@.*\..+$ ]]; then
|
||||
echo "${seahub_admin_email} is not a valid email address"
|
||||
get_seahub_admin_email;
|
||||
fi
|
||||
}
|
||||
|
||||
function get_seahub_admin_passwd () {
|
||||
echo
|
||||
question="Please specify the password you would like to use for seahub administrator:"
|
||||
ask_question "${question}" "nodefault" "seahub admin password"
|
||||
read -s seahub_admin_passwd
|
||||
echo
|
||||
question="Please enter the password again:"
|
||||
ask_question "${question}" "nodefault" "seahub admin password again"
|
||||
read -s seahub_admin_passwd_again
|
||||
echo
|
||||
if [[ "${seahub_admin_passwd}" != "${seahub_admin_passwd_again}" ]]; then
|
||||
printf "\033[33mThe passwords didn't match.\033[m"
|
||||
get_seahub_admin_passwd;
|
||||
elif [[ "${seahub_admin_passwd}" == "" ]]; then
|
||||
echo "Password cannot be empty."
|
||||
get_seahub_admin_passwd;
|
||||
fi
|
||||
}
|
||||
|
||||
# get_seahub_admin_email;
|
||||
# sleep .5;
|
||||
# get_seahub_admin_passwd;
|
||||
# seahub_admin_passwd_enc=$(echo -n ${seahub_admin_passwd} | sha1sum | grep -o "[0-9a-f]*")
|
||||
# sleep .5;
|
||||
|
||||
# printf "\n\n"
|
||||
# echo "This is your seahub admin username/password"
|
||||
# echo
|
||||
# printf "admin username: \033[33m${seahub_admin_email}\033[m\n"
|
||||
# printf "admin password: \033[33m**************\033[m\n\n"
|
||||
|
||||
# echo
|
||||
# echo "If you are OK with the configuration, press [ENTER] to continue."
|
||||
# read dummy
|
||||
|
||||
# usermgr_db_dir=${default_ccnet_conf_dir}/PeerMgr/
|
||||
# usermgr_db=${usermgr_db_dir}/usermgr.db
|
||||
|
||||
# if [[ "${use_existing_ccnet}" != "true" ]]; then
|
||||
# # create admin user/passwd entry in ccnet db
|
||||
# if ! mkdir -p "${usermgr_db_dir}"; then
|
||||
# echo "Failed to create seahub admin."
|
||||
# err_and_quit;
|
||||
# fi
|
||||
|
||||
# sql="CREATE TABLE IF NOT EXISTS EmailUser (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, email TEXT, passwd TEXT, is_staff bool NOT NULL, is_active bool NOT NULL, ctime INTEGER)";
|
||||
|
||||
# if ! sqlite3 "${usermgr_db}" "${sql}" ; then
|
||||
# rm -f "${usermgr_db}"
|
||||
# echo "Failed to create seahub admin."
|
||||
# err_and_quit;
|
||||
# fi
|
||||
|
||||
# sql="INSERT INTO EmailUser(email, passwd, is_staff, is_active, ctime) VALUES (\"${seahub_admin_email}\", \"${seahub_admin_passwd_enc}\", 1, 1, 0);"
|
||||
|
||||
# if ! sqlite3 "${usermgr_db}" "${sql}" ; then
|
||||
# rm -f "${usermgr_db}"
|
||||
# echo "Failed to create seahub admin."
|
||||
# err_and_quit;
|
||||
# fi
|
||||
# fi
|
||||
|
||||
echo "Creating database now, it may take one minute, please wait... "
|
||||
echo
|
||||
|
||||
cd ${TOPDIR}/ccnet && mkdir -m 0755 GroupMgr misc OrgMgr PeerMgr && cd -
|
||||
|
||||
ccnet_group_db=${TOPDIR}/ccnet/GroupMgr/groupmgr.db
|
||||
ccnet_group_sql=${INSTALLPATH}/sql/sqlite/groupmgr.sql
|
||||
if ! sqlite3 ${ccnet_group_db} ".read ${ccnet_group_sql}" 2>/dev/null 1>&2; then
|
||||
echo "Failed to sync ccnet groupmgr database."
|
||||
err_and_quit;
|
||||
fi
|
||||
|
||||
ccnet_config_db=${TOPDIR}/ccnet/misc/config.db
|
||||
ccnet_config_sql=${INSTALLPATH}/sql/sqlite/config.sql
|
||||
if ! sqlite3 ${ccnet_config_db} ".read ${ccnet_config_sql}" 2>/dev/null 1>&2; then
|
||||
echo "Failed to sync ccnet config database."
|
||||
err_and_quit;
|
||||
fi
|
||||
|
||||
ccnet_org_db=${TOPDIR}/ccnet/OrgMgr/orgmgr.db
|
||||
ccnet_org_sql=${INSTALLPATH}/sql/sqlite/org.sql
|
||||
if ! sqlite3 ${ccnet_org_db} ".read ${ccnet_org_sql}" 2>/dev/null 1>&2; then
|
||||
echo "Failed to sync ccnet org database."
|
||||
err_and_quit;
|
||||
fi
|
||||
|
||||
ccnet_user_db=${TOPDIR}/ccnet/PeerMgr/usermgr.db
|
||||
ccnet_user_sql=${INSTALLPATH}/sql/sqlite/user.sql
|
||||
if ! sqlite3 ${ccnet_user_db} ".read ${ccnet_user_sql}" 2>/dev/null 1>&2; then
|
||||
echo "Failed to sync ccnet user database."
|
||||
err_and_quit;
|
||||
fi
|
||||
|
||||
seafile_db=${TOPDIR}/seafile-data/seafile.db
|
||||
seafile_sql=${INSTALLPATH}/sql/sqlite/seafile.sql
|
||||
if ! sqlite3 ${seafile_db} ".read ${seafile_sql}" 2>/dev/null 1>&2; then
|
||||
echo "Failed to sync seafile database."
|
||||
err_and_quit;
|
||||
fi
|
||||
|
||||
seahub_db=${TOPDIR}/seahub.db
|
||||
seahub_sqls=${INSTALLPATH}/seahub/sql/sqlite3.sql
|
||||
if ! sqlite3 ${seahub_db} ".read ${seahub_sqls}" 2>/dev/null 1>&2; then
|
||||
echo "Failed to sync seahub database."
|
||||
err_and_quit;
|
||||
fi
|
||||
echo
|
||||
echo "Done."
|
||||
|
||||
# prepare avatar folder
|
||||
|
||||
media_dir=${INSTALLPATH}/seahub/media
|
||||
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
|
||||
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
|
||||
|
||||
if [[ ! -d ${dest_avatar_dir} ]]; then
|
||||
mkdir -p "${TOPDIR}/seahub-data"
|
||||
mv "${orig_avatar_dir}" "${dest_avatar_dir}"
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
fi
|
||||
|
||||
# Make a seafile-server symlink, like this:
|
||||
# /data/haiwen/
|
||||
# -- seafile-server-2.0.4
|
||||
# -- seafile-server-latest # symlink to 2.0.4
|
||||
seafile_server_symlink=${TOPDIR}/seafile-server-latest
|
||||
echo
|
||||
echo -n "creating seafile-server-latest symbolic link ... "
|
||||
if ! ln -s $(basename ${INSTALLPATH}) ${seafile_server_symlink}; then
|
||||
echo
|
||||
echo
|
||||
echo "Failed to create symbolic link ${seafile_server_symlink}"
|
||||
err_and_quit;
|
||||
fi
|
||||
echo "done"
|
||||
echo
|
||||
|
||||
chmod 0600 "$dest_settings_py"
|
||||
chmod 0700 "$default_ccnet_conf_dir"
|
||||
chmod 0700 "$default_seafile_data_dir"
|
||||
chmod 0700 "$default_conf_dir"
|
||||
|
||||
# -------------------------------------------
|
||||
# copy user manuals to library template
|
||||
# -------------------------------------------
|
||||
copy_user_manuals;
|
||||
|
||||
# -------------------------------------------
|
||||
# final message
|
||||
# -------------------------------------------
|
||||
|
||||
sleep 1
|
||||
|
||||
echo
|
||||
echo "-----------------------------------------------------------------"
|
||||
echo "Your seafile server configuration has been completed successfully."
|
||||
echo "-----------------------------------------------------------------"
|
||||
echo
|
||||
echo "run seafile server: ./seafile.sh { start | stop | restart }"
|
||||
echo "run seahub server: ./seahub.sh { start <port> | stop | restart <port> }"
|
||||
echo
|
||||
echo "-----------------------------------------------------------------"
|
||||
echo "If the server is behind a firewall, remember to open these tcp ports:"
|
||||
echo "-----------------------------------------------------------------"
|
||||
echo
|
||||
echo "port of seafile fileserver: ${fileserver_port}"
|
||||
echo "port of seahub: 8000"
|
||||
echo
|
||||
echo -e "When problems occur, refer to\n"
|
||||
echo -e " ${server_manual_http}\n"
|
||||
echo "for more information."
|
||||
echo
|
@ -1,82 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""Lifted from:
|
||||
http://stackoverflow.com/questions/18671/quick-easy-way-to-migrate-sqlite3-to-mysql
|
||||
|
||||
Run like so:
|
||||
|
||||
sqlite3 <your db>.db .dump | python sqlite2mysql.py > <your db>.sql
|
||||
|
||||
Then you can import the .sql file into MySql
|
||||
|
||||
Note - you need to add foreign key constrains manually since sqlite doesn't actually support them
|
||||
"""
|
||||
import re
|
||||
import fileinput
|
||||
|
||||
def this_line_is_useless(line):
|
||||
useless_es = [
|
||||
'BEGIN TRANSACTION',
|
||||
'COMMIT',
|
||||
'sqlite_sequence',
|
||||
'CREATE UNIQUE INDEX',
|
||||
'PRAGMA',
|
||||
]
|
||||
for useless in useless_es:
|
||||
if re.search(useless, line):
|
||||
return True
|
||||
|
||||
def has_primary_key(line):
|
||||
return bool(re.search(r'PRIMARY KEY', line))
|
||||
|
||||
for line in fileinput.input():
|
||||
searching_for_end = False
|
||||
if this_line_is_useless(line): continue
|
||||
|
||||
# this line was necessary because ''); was getting
|
||||
# converted (inappropriately) to \');
|
||||
if re.match(r".*, ''\);", line):
|
||||
line = re.sub(r"''\);", r'``);', line)
|
||||
|
||||
if re.match(r'^CREATE TABLE.*', line):
|
||||
searching_for_end = True
|
||||
|
||||
m = re.search('CREATE TABLE [`"]?(\w*)[`"]?(.*)', line)
|
||||
if m:
|
||||
name, sub = m.groups()
|
||||
sub = sub.replace('"','`')
|
||||
line = "DROP TABLE IF EXISTS `%(name)s`;\nCREATE TABLE IF NOT EXISTS `%(name)s`%(sub)s\n"
|
||||
line = line % dict(name=name, sub=sub)
|
||||
else:
|
||||
m = re.search('INSERT INTO "(\w*)"(.*)', line)
|
||||
if m:
|
||||
name, sub = m.groups()
|
||||
line = 'INSERT INTO `%s`%s\n' % m.groups()
|
||||
line = line.replace('"', r'\"')
|
||||
line = line.replace('"', "'")
|
||||
# line = re.sub(r"([^'])'t'(.)", r"\1THIS_IS_TRUE\2", line)
|
||||
# line = line.replace('THIS_IS_TRUE', '1')
|
||||
# line = re.sub(r"([^'])'f'(.)", r"\1THIS_IS_FALSE\2", line)
|
||||
# line = line.replace('THIS_IS_FALSE', '0')
|
||||
|
||||
# Add auto_increment if it's not there since sqlite auto_increments ALL
|
||||
# primary keys
|
||||
if searching_for_end:
|
||||
if re.search(r"integer(?:\s+\w+)*\s*PRIMARY KEY(?:\s+\w+)*\s*,", line, re.I):
|
||||
line = line.replace("PRIMARY KEY", "PRIMARY KEY AUTO_INCREMENT")
|
||||
# replace " and ' with ` because mysql doesn't like quotes in CREATE commands
|
||||
line = line.replace('"', '`').replace("'", '`')
|
||||
|
||||
# And now we convert it back (see above)
|
||||
if re.match(r".*, ``\);", line):
|
||||
line = re.sub(r'``\);', r"'');", line)
|
||||
|
||||
if searching_for_end and re.match(r'.*\);', line):
|
||||
searching_for_end = False
|
||||
|
||||
if re.match(r"CREATE INDEX", line):
|
||||
line = re.sub('"', '`', line)
|
||||
|
||||
line = line.replace('"', '`')
|
||||
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
|
||||
print(line)
|
@ -1,118 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# This shell script and corresponding sqlite2mysql.py are used to
|
||||
# migrate Seafile data from SQLite to MySQL.
|
||||
#
|
||||
# Setup:
|
||||
#
|
||||
# 1. Move this file and sqlite2mysql.py to the top directory of your Seafile
|
||||
# installation path (e.g. /data/haiwen).
|
||||
# 2. Run: ./sqlite2mysql.sh
|
||||
# 3. Three files(ccnet-db.sql, seafile-db.sql, seahub-db.sql) are created.
|
||||
# 4. Loads these files to MySQL
|
||||
# (mysql> source ccnet-db.sql)
|
||||
#
|
||||
|
||||
CCNET_DB='ccnet-db.sql'
|
||||
SEAFILE_DB='seafile-db.sql'
|
||||
SEAHUB_DB='seahub-db.sql'
|
||||
|
||||
########## ccnet
|
||||
seafile_path=$(pwd)
|
||||
if [ -f "${seafile_path}/conf/ccnet.conf" ]; then
|
||||
USER_MGR_DB=${seafile_path}/ccnet/PeerMgr/usermgr.db
|
||||
GRP_MGR_DB=${seafile_path}/ccnet/GroupMgr/groupmgr.db
|
||||
else
|
||||
echo "${seafile_path}/conf/ccnet.conf does not exists."
|
||||
read -p "Please provide your ccnet.conf path(e.g. /data/haiwen/conf/ccnet.conf): " ccnet_conf_path
|
||||
if [ -f ${ccnet_conf_path} ]; then
|
||||
USER_MGR_DB=$(dirname $(dirname "${ccnet_conf_path}"))/ccnet/PeerMgr/usermgr.db
|
||||
GRP_MGR_DB=$(dirname $(dirname "${ccnet_conf_path}"))/ccnet/GroupMgr/groupmgr.db
|
||||
else
|
||||
echo "${ccnet_conf_path} does not exists, quit."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
rm -rf ${CCNET_DB}
|
||||
|
||||
echo "sqlite3 ${USER_MGR_DB} .dump | python sqlite2mysql.py > ${CCNET_DB}"
|
||||
sqlite3 ${USER_MGR_DB} .dump | python sqlite2mysql.py > ${CCNET_DB}
|
||||
echo "sqlite3 ${GRP_MGR_DB} .dump | python sqlite2mysql.py >> ${CCNET_DB}"
|
||||
sqlite3 ${GRP_MGR_DB} .dump | python sqlite2mysql.py >> ${CCNET_DB}
|
||||
|
||||
# change ctime from INTEGER to BIGINT in EmailUser table
|
||||
sed 's/ctime INTEGER/ctime BIGINT/g' ${CCNET_DB} > ${CCNET_DB}.tmp && mv ${CCNET_DB}.tmp ${CCNET_DB}
|
||||
|
||||
# change email in UserRole from TEXT to VARCHAR(255)
|
||||
sed 's/email TEXT, role TEXT/email VARCHAR(255), role TEXT/g' ${CCNET_DB} > ${CCNET_DB}.tmp && mv ${CCNET_DB}.tmp ${CCNET_DB}
|
||||
|
||||
########## seafile
|
||||
rm -rf ${SEAFILE_DB}
|
||||
|
||||
if [ -f "${seafile_path}/seafile-data/seafile.db" ]; then
|
||||
echo "sqlite3 ${seafile_path}/seafile-data/seafile.db .dump | python sqlite2mysql.py > ${SEAFILE_DB}"
|
||||
sqlite3 ${seafile_path}/seafile-data/seafile.db .dump | python sqlite2mysql.py > ${SEAFILE_DB}
|
||||
else
|
||||
echo "${seafile_path}/seafile-data/seafile.db does not exists."
|
||||
read -p "Please provide your seafile.db path(e.g. /data/haiwen/seafile-data/seafile.db): " seafile_db_path
|
||||
if [ -f ${seafile_db_path} ];then
|
||||
echo "sqlite3 ${seafile_db_path} .dump | python sqlite2mysql.py > ${SEAFILE_DB}"
|
||||
sqlite3 ${seafile_db_path} .dump | python sqlite2mysql.py > ${SEAFILE_DB}
|
||||
else
|
||||
echo "${seafile_db_path} does not exists, quit."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# change owner_id in RepoOwner from TEXT to VARCHAR(255)
|
||||
sed 's/owner_id TEXT/owner_id VARCHAR(255)/g' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
|
||||
|
||||
# change user_name in RepoGroup from TEXT to VARCHAR(255)
|
||||
sed 's/user_name TEXT/user_name VARCHAR(255)/g' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
|
||||
|
||||
########## seahub
|
||||
rm -rf ${SEAHUB_DB}
|
||||
|
||||
if [ -f "${seafile_path}/seahub.db" ]; then
|
||||
echo "sqlite3 ${seafile_path}/seahub.db .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}"
|
||||
sqlite3 ${seafile_path}/seahub.db .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}
|
||||
else
|
||||
echo "${seafile_path}/seahub.db does not exists."
|
||||
read -p "Please prove your seahub.db path(e.g. /data/haiwen/seahub.db): " seahub_db_path
|
||||
if [ -f ${seahub_db_path} ]; then
|
||||
echo "sqlite3 ${seahub_db_path} .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}"
|
||||
sqlite3 ${seahub_db_path} .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}
|
||||
else
|
||||
echo "${seahub_db_path} does not exists, quit."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# change username from VARCHAR(256) to VARCHAR(255) in wiki_personalwiki
|
||||
sed 's/varchar(256) NOT NULL UNIQUE/varchar(255) NOT NULL UNIQUE/g' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
|
||||
# remove unique from contacts_contact
|
||||
sed 's/, UNIQUE (`user_email`, `contact_email`)//g' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
|
||||
# remove base_dirfileslastmodifiedinfo records to avoid json string parsing issue between sqlite and mysql
|
||||
sed '/INSERT INTO `base_dirfileslastmodifiedinfo`/d' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
|
||||
# remove notifications_usernotification records to avoid json string parsing issue between sqlite and mysql
|
||||
sed '/INSERT INTO `notifications_usernotification`/d' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
|
||||
|
||||
########## common logic
|
||||
|
||||
# add ENGIN=INNODB to create table statment
|
||||
for sql_file in $CCNET_DB $SEAFILE_DB $SEAHUB_DB
|
||||
do
|
||||
sed -r 's/(CREATE TABLE.*);/\1 ENGINE=INNODB;/g' $sql_file > $sql_file.tmp && mv $sql_file.tmp $sql_file
|
||||
done
|
||||
|
||||
# remove COLLATE NOCASE if possible
|
||||
for sql_file in $CCNET_DB $SEAFILE_DB $SEAHUB_DB
|
||||
do
|
||||
sed 's/COLLATE NOCASE//g' $sql_file > $sql_file.tmp && mv $sql_file.tmp $sql_file
|
||||
done
|
||||
|
@ -1,75 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# This shell script is used to add COLLATE NOCASE to email field to avoid case
|
||||
# issue in sqlite.
|
||||
#
|
||||
# 1. ./add-collate.sh <ccnet_dir> <seafile_dir> <seahub_db>
|
||||
#
|
||||
|
||||
USER_DB='/tmp/user-db.sql'
|
||||
GROUP_DB='/tmp/group-db.sql'
|
||||
SEAFILE_DB='/tmp/seafile-db.sql'
|
||||
SEAHUB_DB='/tmp/seahub-db.sql'
|
||||
|
||||
ccnet_dir=$1
|
||||
|
||||
########## ccnet
|
||||
USER_MGR_DB=${ccnet_dir}/PeerMgr/usermgr.db
|
||||
GRP_MGR_DB=${ccnet_dir}/GroupMgr/groupmgr.db
|
||||
|
||||
rm -rf ${USER_DB}
|
||||
rm -rf ${GROUP_DB}
|
||||
|
||||
echo "sqlite3 ${USER_MGR_DB} .dump > ${USER_DB}"
|
||||
sqlite3 ${USER_MGR_DB} .dump > ${USER_DB}
|
||||
echo "sqlite3 ${GRP_MGR_DB} .dump > ${GROUP_DB}"
|
||||
sqlite3 ${GRP_MGR_DB} .dump > ${GROUP_DB}
|
||||
|
||||
sed -r 's/(CREATE TABLE EmailUser.*)email TEXT,(.*)/\1email TEXT COLLATE NOCASE,\2/I' ${USER_DB} > ${USER_DB}.tmp && mv ${USER_DB}.tmp ${USER_DB}
|
||||
sed -r 's/(CREATE TABLE Binding.*)email TEXT,(.*)/\1email TEXT COLLATE NOCASE,\2/I' ${USER_DB} > ${USER_DB}.tmp && mv ${USER_DB}.tmp ${USER_DB}
|
||||
sed -r 's/(CREATE TABLE `Group`.*)`creator_name` VARCHAR\(255\),(.*)/\1`creator_name` VARCHAR\(255\) COLLATE NOCASE,\2/I' ${GROUP_DB} > ${GROUP_DB}.tmp && mv ${GROUP_DB}.tmp ${GROUP_DB}
|
||||
sed -r 's/(CREATE TABLE `GroupUser`.*)`user_name` VARCHAR\(255\),(.*)/\1`user_name` VARCHAR\(255\) COLLATE NOCASE,\2/I' ${GROUP_DB} > ${GROUP_DB}.tmp && mv ${GROUP_DB}.tmp ${GROUP_DB}
|
||||
|
||||
# backup & restore
|
||||
mv ${USER_MGR_DB} ${USER_MGR_DB}.`date +"%Y%m%d%H%M%S"`
|
||||
mv ${GRP_MGR_DB} ${GRP_MGR_DB}.`date +"%Y%m%d%H%M%S"`
|
||||
sqlite3 ${USER_MGR_DB} < ${USER_DB}
|
||||
sqlite3 ${GRP_MGR_DB} < ${GROUP_DB}
|
||||
|
||||
########## seafile
|
||||
rm -rf ${SEAFILE_DB}
|
||||
|
||||
SEAFILE_DB_FILE=$2/seafile.db
|
||||
echo "sqlite3 ${SEAFILE_DB_FILE} .dump > ${SEAFILE_DB}"
|
||||
sqlite3 ${SEAFILE_DB_FILE} .dump > ${SEAFILE_DB}
|
||||
|
||||
sed -r 's/(CREATE TABLE RepoOwner.*)owner_id TEXT(.*)/\1owner_id TEXT COLLATE NOCASE\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
|
||||
sed -r 's/(CREATE TABLE RepoGroup.*)user_name TEXT,(.*)/\1user_name TEXT COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
|
||||
sed -r 's/(CREATE TABLE RepoUserToken.*)email VARCHAR\(255\),(.*)/\1email VARCHAR\(255\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
|
||||
sed -r 's/(CREATE TABLE UserQuota.*)user VARCHAR\(255\),(.*)/\1user VARCHAR\(255\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
|
||||
sed -r 's/(CREATE TABLE SharedRepo.*)from_email VARCHAR\(512\), to_email VARCHAR\(512\),(.*)/\1from_email VARCHAR\(512\), to_email VARCHAR\(512\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
|
||||
|
||||
# backup & restore
|
||||
mv ${SEAFILE_DB_FILE} ${SEAFILE_DB_FILE}.`date +"%Y%m%d%H%M%S"`
|
||||
sqlite3 ${SEAFILE_DB_FILE} < ${SEAFILE_DB}
|
||||
|
||||
########## seahub
|
||||
rm -rf ${SEAHUB_DB}
|
||||
|
||||
SEAHUB_DB_FILE=$3
|
||||
echo "sqlite3 ${SEAHUB_DB_FILE} .Dump | tr -d '\n' | sed 's/;/;\n/g' > ${SEAHUB_DB}"
|
||||
sqlite3 ${SEAHUB_DB_FILE} .dump | tr -d '\n' | sed 's/;/;\n/g' > ${SEAHUB_DB}
|
||||
|
||||
sed -r 's/(CREATE TABLE "notifications_usernotification".*)"to_user" varchar\(255\) NOT NULL,(.*)/\1"to_user" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
sed -r 's/(CREATE TABLE "profile_profile".*)"user" varchar\(75\) NOT NULL UNIQUE,(.*)/\1"user" varchar\(75\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
sed -r 's/(CREATE TABLE "share_fileshare".*)"username" varchar\(255\) NOT NULL,(.*)/\1"username" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
sed -r 's/(CREATE TABLE "api2_token".*)"user" varchar\(255\) NOT NULL UNIQUE,(.*)/\1"user" varchar\(255\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
sed -r 's/(CREATE TABLE "wiki_personalwiki".*)"username" varchar\(256\) NOT NULL UNIQUE,(.*)/\1"username" varchar\(256\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
sed -r 's/(CREATE TABLE "message_usermessage".*)"from_email" varchar\(75\) NOT NULL,\s*"to_email" varchar\(75\) NOT NULL,(.*)/\1"from_email" varchar\(75\) NOT NULL COLLATE NOCASE, "to_email" varchar\(75\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
sed -r 's/(CREATE TABLE "avatar_avatar".*)"emailuser" varchar\(255\) NOT NULL,(.*)/\1"emailuser" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
|
||||
|
||||
# backup & restore
|
||||
mv ${SEAHUB_DB_FILE} ${SEAHUB_DB_FILE}.`date +"%Y%m%d%H%M%S"`
|
||||
sqlite3 ${SEAHUB_DB_FILE} < ${SEAHUB_DB}
|
||||
|
||||
rm -rf ${USER_DB} ${GROUP_DB} ${SEAFILE_DB} ${SEAHUB_DB}
|
@ -1,52 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sqlite3
|
||||
import os
|
||||
import sys
|
||||
|
||||
def usage():
|
||||
msg = 'usage: %s <seahub db>' % os.path.basename(sys.argv[0])
|
||||
print(msg)
|
||||
|
||||
def main():
|
||||
seahub_db = sys.argv[1]
|
||||
|
||||
conn = sqlite3.connect(seahub_db)
|
||||
c = conn.cursor()
|
||||
|
||||
try:
|
||||
c.execute('SELECT s_type from share_fileshare')
|
||||
except sqlite3.OperationalError:
|
||||
# only add this column if not exist yet, so this script is idempotent
|
||||
c.execute('ALTER table share_fileshare add column "s_type" varchar(2) NOT NULL DEFAULT "f"')
|
||||
|
||||
c.execute('CREATE INDEX IF NOT EXISTS "share_fileshare_f775835c" ON "share_fileshare" ("s_type")')
|
||||
|
||||
sql = '''CREATE TABLE IF NOT EXISTS "base_dirfileslastmodifiedinfo" (
|
||||
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
"repo_id" varchar(36) NOT NULL,
|
||||
"parent_dir" text NOT NULL,
|
||||
"parent_dir_hash" varchar(12) NOT NULL,
|
||||
"dir_id" varchar(40) NOT NULL,
|
||||
"last_modified_info" text NOT NULL,
|
||||
UNIQUE ("repo_id", "parent_dir_hash"))'''
|
||||
|
||||
c.execute(sql)
|
||||
|
||||
sql = '''CREATE TABLE IF NOT EXISTS "api2_token" (
|
||||
"key" varchar(40) NOT NULL PRIMARY KEY,
|
||||
"user" varchar(255) NOT NULL UNIQUE,
|
||||
"created" datetime NOT NULL)'''
|
||||
|
||||
c.execute(sql)
|
||||
|
||||
conn.commit()
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) != 2:
|
||||
usage()
|
||||
sys.exit(1)
|
||||
|
||||
main()
|
||||
|
||||
|
@ -1,384 +0,0 @@
|
||||
# coding: UTF-8
|
||||
|
||||
import sys
|
||||
import os
|
||||
import configparser
|
||||
import glob
|
||||
|
||||
HAS_PYMYSQL = True
|
||||
try:
|
||||
import pymysql
|
||||
except ImportError:
|
||||
HAS_PYMYSQL = False
|
||||
|
||||
HAS_SQLITE3 = True
|
||||
try:
|
||||
import sqlite3
|
||||
except ImportError:
|
||||
HAS_SQLITE3 = False
|
||||
|
||||
class EnvManager(object):
|
||||
def __init__(self):
|
||||
self.upgrade_dir = os.path.dirname(__file__)
|
||||
self.install_path = os.path.dirname(self.upgrade_dir)
|
||||
self.top_dir = os.path.dirname(self.install_path)
|
||||
self.ccnet_dir = os.environ['CCNET_CONF_DIR']
|
||||
self.seafile_dir = os.environ['SEAFILE_CONF_DIR']
|
||||
self.central_config_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR')
|
||||
|
||||
|
||||
env_mgr = EnvManager()
|
||||
|
||||
|
||||
class Utils(object):
|
||||
@staticmethod
|
||||
def highlight(content, is_error=False):
|
||||
'''Add ANSI color to content to get it highlighted on terminal'''
|
||||
if is_error:
|
||||
return '\x1b[1;31m%s\x1b[m' % content
|
||||
else:
|
||||
return '\x1b[1;32m%s\x1b[m' % content
|
||||
|
||||
@staticmethod
|
||||
def info(msg):
|
||||
print(Utils.highlight('[INFO] ') + msg)
|
||||
|
||||
@staticmethod
|
||||
def warning(msg):
|
||||
print(Utils.highlight('[WARNING] ') + msg)
|
||||
|
||||
@staticmethod
|
||||
def error(msg):
|
||||
print(Utils.highlight('[ERROR] ') + msg)
|
||||
sys.exit(1)
|
||||
|
||||
@staticmethod
|
||||
def read_config(config_path, defaults):
|
||||
if not os.path.exists(config_path):
|
||||
Utils.error('Config path %s doesn\'t exist, stop db upgrade' %
|
||||
config_path)
|
||||
cp = configparser.ConfigParser(defaults)
|
||||
cp.read(config_path)
|
||||
return cp
|
||||
|
||||
|
||||
class MySQLDBInfo(object):
|
||||
def __init__(self, host, port, username, password, db, unix_socket=None):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.db = db
|
||||
self.unix_socket = unix_socket
|
||||
|
||||
|
||||
class DBUpdater(object):
|
||||
def __init__(self, version, name):
|
||||
self.sql_dir = os.path.join(env_mgr.upgrade_dir, 'sql', version, name)
|
||||
pro_path = os.path.join(env_mgr.install_path, 'pro')
|
||||
self.is_pro = os.path.exists(pro_path)
|
||||
|
||||
@staticmethod
|
||||
def get_instance(version):
|
||||
'''Detect whether we are using mysql or sqlite3'''
|
||||
ccnet_db_info = DBUpdater.get_ccnet_mysql_info(version)
|
||||
seafile_db_info = DBUpdater.get_seafile_mysql_info(version)
|
||||
seahub_db_info = DBUpdater.get_seahub_mysql_info()
|
||||
|
||||
if ccnet_db_info and seafile_db_info and seahub_db_info:
|
||||
Utils.info('You are using MySQL')
|
||||
if not HAS_PYMYSQL:
|
||||
Utils.error('Python pymysql module is not found')
|
||||
updater = MySQLDBUpdater(version, ccnet_db_info, seafile_db_info, seahub_db_info)
|
||||
|
||||
elif (ccnet_db_info is None) and (seafile_db_info is None) and (seahub_db_info is None):
|
||||
Utils.info('You are using SQLite3')
|
||||
if not HAS_SQLITE3:
|
||||
Utils.error('Python sqlite3 module is not found')
|
||||
updater = SQLiteDBUpdater(version)
|
||||
|
||||
else:
|
||||
def to_db_string(info):
|
||||
if info is None:
|
||||
return 'SQLite3'
|
||||
else:
|
||||
return 'MySQL'
|
||||
Utils.error('Error:\n ccnet is using %s\n seafile is using %s\n seahub is using %s\n'
|
||||
% (to_db_string(ccnet_db_info),
|
||||
to_db_string(seafile_db_info),
|
||||
to_db_string(seahub_db_info)))
|
||||
|
||||
return updater
|
||||
|
||||
def update_db(self):
|
||||
ccnet_sql = os.path.join(self.sql_dir, 'ccnet.sql')
|
||||
seafile_sql = os.path.join(self.sql_dir, 'seafile.sql')
|
||||
seahub_sql = os.path.join(self.sql_dir, 'seahub.sql')
|
||||
seafevents_sql = os.path.join(self.sql_dir, 'seafevents.sql')
|
||||
|
||||
if os.path.exists(ccnet_sql):
|
||||
Utils.info('updating ccnet database...')
|
||||
self.update_ccnet_sql(ccnet_sql)
|
||||
|
||||
if os.path.exists(seafile_sql):
|
||||
Utils.info('updating seafile database...')
|
||||
self.update_seafile_sql(seafile_sql)
|
||||
|
||||
if os.path.exists(seahub_sql):
|
||||
Utils.info('updating seahub database...')
|
||||
self.update_seahub_sql(seahub_sql)
|
||||
|
||||
if os.path.exists(seafevents_sql):
|
||||
self.update_seafevents_sql(seafevents_sql)
|
||||
|
||||
@staticmethod
|
||||
def get_ccnet_mysql_info(version):
|
||||
if version > '5.0.0':
|
||||
config_path = env_mgr.central_config_dir
|
||||
else:
|
||||
config_path = env_mgr.ccnet_dir
|
||||
|
||||
ccnet_conf = os.path.join(config_path, 'ccnet.conf')
|
||||
defaults = {
|
||||
'HOST': '127.0.0.1',
|
||||
'PORT': '3306',
|
||||
'UNIX_SOCKET': '',
|
||||
}
|
||||
|
||||
config = Utils.read_config(ccnet_conf, defaults)
|
||||
db_section = 'Database'
|
||||
|
||||
if not config.has_section(db_section):
|
||||
return None
|
||||
|
||||
type = config.get(db_section, 'ENGINE')
|
||||
if type != 'mysql':
|
||||
return None
|
||||
|
||||
try:
|
||||
host = config.get(db_section, 'HOST')
|
||||
port = config.getint(db_section, 'PORT')
|
||||
username = config.get(db_section, 'USER')
|
||||
password = config.get(db_section, 'PASSWD')
|
||||
db = config.get(db_section, 'DB')
|
||||
unix_socket = config.get(db_section, 'UNIX_SOCKET')
|
||||
except configparser.NoOptionError as e:
|
||||
Utils.error('Database config in ccnet.conf is invalid: %s' % e)
|
||||
|
||||
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
|
||||
return info
|
||||
|
||||
@staticmethod
|
||||
def get_seafile_mysql_info(version):
|
||||
if version > '5.0.0':
|
||||
config_path = env_mgr.central_config_dir
|
||||
else:
|
||||
config_path = env_mgr.seafile_dir
|
||||
|
||||
seafile_conf = os.path.join(config_path, 'seafile.conf')
|
||||
defaults = {
|
||||
'HOST': '127.0.0.1',
|
||||
'PORT': '3306',
|
||||
'UNIX_SOCKET': '',
|
||||
}
|
||||
config = Utils.read_config(seafile_conf, defaults)
|
||||
db_section = 'database'
|
||||
|
||||
if not config.has_section(db_section):
|
||||
return None
|
||||
|
||||
type = config.get(db_section, 'type')
|
||||
if type != 'mysql':
|
||||
return None
|
||||
|
||||
try:
|
||||
host = config.get(db_section, 'host')
|
||||
port = config.getint(db_section, 'port')
|
||||
username = config.get(db_section, 'user')
|
||||
password = config.get(db_section, 'password')
|
||||
db = config.get(db_section, 'db_name')
|
||||
unix_socket = config.get(db_section, 'unix_socket')
|
||||
except configparser.NoOptionError as e:
|
||||
Utils.error('Database config in seafile.conf is invalid: %s' % e)
|
||||
|
||||
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
|
||||
return info
|
||||
|
||||
@staticmethod
|
||||
def get_seahub_mysql_info():
|
||||
sys.path.insert(0, env_mgr.top_dir)
|
||||
if env_mgr.central_config_dir:
|
||||
sys.path.insert(0, env_mgr.central_config_dir)
|
||||
try:
|
||||
import seahub_settings # pylint: disable=F0401
|
||||
except ImportError as e:
|
||||
Utils.error('Failed to import seahub_settings.py: %s' % e)
|
||||
|
||||
if not hasattr(seahub_settings, 'DATABASES'):
|
||||
return None
|
||||
|
||||
try:
|
||||
d = seahub_settings.DATABASES['default']
|
||||
if d['ENGINE'] != 'django.db.backends.mysql':
|
||||
return None
|
||||
|
||||
host = d.get('HOST', '127.0.0.1')
|
||||
port = int(d.get('PORT', 3306))
|
||||
username = d['USER']
|
||||
password = d['PASSWORD']
|
||||
db = d['NAME']
|
||||
unix_socket = host if host.startswith('/') else None
|
||||
except KeyError:
|
||||
Utils.error('Database config in seahub_settings.py is invalid: %s' % e)
|
||||
|
||||
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
|
||||
return info
|
||||
|
||||
def update_ccnet_sql(self, ccnet_sql):
|
||||
raise NotImplementedError
|
||||
|
||||
def update_seafile_sql(self, seafile_sql):
|
||||
raise NotImplementedError
|
||||
|
||||
def update_seahub_sql(self, seahub_sql):
|
||||
raise NotImplementedError
|
||||
|
||||
def update_seafevents_sql(self, seafevents_sql):
|
||||
raise NotImplementedError
|
||||
|
||||
class CcnetSQLiteDB(object):
|
||||
def __init__(self, ccnet_dir):
|
||||
self.ccnet_dir = ccnet_dir
|
||||
|
||||
def get_db(self, dbname):
|
||||
dbs = (
|
||||
'ccnet.db',
|
||||
'GroupMgr/groupmgr.db',
|
||||
'misc/config.db',
|
||||
'OrgMgr/orgmgr.db',
|
||||
'PeerMgr/usermgr.db',
|
||||
)
|
||||
for db in dbs:
|
||||
if os.path.splitext(os.path.basename(db))[0] == dbname:
|
||||
return os.path.join(self.ccnet_dir, db)
|
||||
|
||||
class SQLiteDBUpdater(DBUpdater):
|
||||
def __init__(self, version):
|
||||
DBUpdater.__init__(self, version, 'sqlite3')
|
||||
|
||||
self.ccnet_db = CcnetSQLiteDB(env_mgr.ccnet_dir)
|
||||
self.seafile_db = os.path.join(env_mgr.seafile_dir, 'seafile.db')
|
||||
self.seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db')
|
||||
self.seafevents_db = os.path.join(env_mgr.top_dir, 'seafevents.db')
|
||||
|
||||
def update_db(self):
|
||||
super(SQLiteDBUpdater, self).update_db()
|
||||
for sql_path in glob.glob(os.path.join(self.sql_dir, 'ccnet', '*.sql')):
|
||||
self.update_ccnet_sql(sql_path)
|
||||
|
||||
def apply_sqls(self, db_path, sql_path):
|
||||
with open(sql_path, 'r') as fp:
|
||||
lines = fp.read().split(';')
|
||||
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
else:
|
||||
conn.execute(line)
|
||||
|
||||
def update_ccnet_sql(self, sql_path):
|
||||
dbname = os.path.splitext(os.path.basename(sql_path))[0]
|
||||
self.apply_sqls(self.ccnet_db.get_db(dbname), sql_path)
|
||||
|
||||
def update_seafile_sql(self, sql_path):
|
||||
self.apply_sqls(self.seafile_db, sql_path)
|
||||
|
||||
def update_seahub_sql(self, sql_path):
|
||||
self.apply_sqls(self.seahub_db, sql_path)
|
||||
|
||||
def update_seafevents_sql(self, sql_path):
|
||||
if self.is_pro:
|
||||
Utils.info('seafevents do not support sqlite3 database')
|
||||
|
||||
|
||||
class MySQLDBUpdater(DBUpdater):
|
||||
def __init__(self, version, ccnet_db_info, seafile_db_info, seahub_db_info):
|
||||
DBUpdater.__init__(self, version, 'mysql')
|
||||
self.ccnet_db_info = ccnet_db_info
|
||||
self.seafile_db_info = seafile_db_info
|
||||
self.seahub_db_info = seahub_db_info
|
||||
|
||||
def update_ccnet_sql(self, ccnet_sql):
|
||||
self.apply_sqls(self.ccnet_db_info, ccnet_sql)
|
||||
|
||||
def update_seafile_sql(self, seafile_sql):
|
||||
self.apply_sqls(self.seafile_db_info, seafile_sql)
|
||||
|
||||
def update_seahub_sql(self, seahub_sql):
|
||||
self.apply_sqls(self.seahub_db_info, seahub_sql)
|
||||
|
||||
def update_seafevents_sql(self, seafevents_sql):
|
||||
if self.is_pro:
|
||||
Utils.info('updating seafevents database...')
|
||||
self.apply_sqls(self.seahub_db_info, seafevents_sql)
|
||||
|
||||
def get_conn(self, info):
|
||||
kw = dict(
|
||||
user=info.username,
|
||||
passwd=info.password,
|
||||
db=info.db,
|
||||
)
|
||||
if info.unix_socket:
|
||||
kw['unix_socket'] = info.unix_socket
|
||||
else:
|
||||
kw['host'] = info.host
|
||||
kw['port'] = info.port
|
||||
try:
|
||||
conn = pymysql.connect(**kw)
|
||||
except Exception as e:
|
||||
if isinstance(e, pymysql.err.OperationalError):
|
||||
msg = str(e.args[1])
|
||||
else:
|
||||
msg = str(e)
|
||||
Utils.error('Failed to connect to mysql database %s: %s' % (info.db, msg))
|
||||
|
||||
return conn
|
||||
|
||||
def execute_sql(self, conn, sql):
|
||||
cursor = conn.cursor()
|
||||
try:
|
||||
cursor.execute(sql)
|
||||
conn.commit()
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
Utils.warning('Failed to execute sql: %s' % msg)
|
||||
|
||||
def apply_sqls(self, info, sql_path):
|
||||
with open(sql_path, 'r') as fp:
|
||||
lines = fp.read().split(';')
|
||||
|
||||
conn = self.get_conn(info)
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
else:
|
||||
self.execute_sql(conn, line)
|
||||
|
||||
|
||||
def main():
|
||||
skipdb = os.environ.get('SEAFILE_SKIP_DB_UPGRADE', '').lower()
|
||||
if skipdb in ('1', 'true', 'on'):
|
||||
print('Database upgrade skipped because SEAFILE_SKIP_DB_UPGRADE=%s' % skipdb)
|
||||
sys.exit()
|
||||
version = sys.argv[1]
|
||||
db_updater = DBUpdater.get_instance(version)
|
||||
db_updater.update_db()
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,234 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import configparser
|
||||
import getpass
|
||||
from collections import namedtuple
|
||||
|
||||
try:
|
||||
import pymysql
|
||||
HAS_PYMYSQL = True
|
||||
except ImportError:
|
||||
HAS_PYMYSQL = False
|
||||
|
||||
MySQLDBInfo = namedtuple('MySQLDBInfo', 'host port username password db')
|
||||
|
||||
class EnvManager(object):
|
||||
def __init__(self):
|
||||
self.upgrade_dir = os.path.abspath(os.path.dirname(__file__))
|
||||
self.install_path = os.path.dirname(self.upgrade_dir)
|
||||
self.top_dir = os.path.dirname(self.install_path)
|
||||
self.ccnet_dir = os.environ['CCNET_CONF_DIR']
|
||||
self.seafile_dir = os.environ['SEAFILE_CONF_DIR']
|
||||
|
||||
env_mgr = EnvManager()
|
||||
|
||||
class Utils(object):
|
||||
@staticmethod
|
||||
def highlight(content, is_error=False):
|
||||
'''Add ANSI color to content to get it highlighted on terminal'''
|
||||
if is_error:
|
||||
return '\x1b[1;31m%s\x1b[m' % content
|
||||
else:
|
||||
return '\x1b[1;32m%s\x1b[m' % content
|
||||
|
||||
@staticmethod
|
||||
def info(msg):
|
||||
print(Utils.highlight('[INFO] ') + msg)
|
||||
|
||||
@staticmethod
|
||||
def error(msg):
|
||||
print(Utils.highlight('[ERROR] ') + msg)
|
||||
sys.exit(1)
|
||||
|
||||
@staticmethod
|
||||
def read_config(config_path, defaults):
|
||||
cp = configparser.ConfigParser(defaults)
|
||||
cp.read(config_path)
|
||||
return cp
|
||||
|
||||
def get_ccnet_mysql_info():
|
||||
ccnet_conf = os.path.join(env_mgr.ccnet_dir, 'ccnet.conf')
|
||||
defaults = {
|
||||
'HOST': '127.0.0.1',
|
||||
'PORT': '3306',
|
||||
}
|
||||
|
||||
config = Utils.read_config(ccnet_conf, defaults)
|
||||
db_section = 'Database'
|
||||
|
||||
if not config.has_section(db_section):
|
||||
return None
|
||||
|
||||
type = config.get(db_section, 'ENGINE')
|
||||
if type != 'mysql':
|
||||
return None
|
||||
|
||||
try:
|
||||
host = config.get(db_section, 'HOST')
|
||||
port = config.getint(db_section, 'PORT')
|
||||
username = config.get(db_section, 'USER')
|
||||
password = config.get(db_section, 'PASSWD')
|
||||
db = config.get(db_section, 'DB')
|
||||
except configparser.NoOptionError as e:
|
||||
Utils.error('Database config in ccnet.conf is invalid: %s' % e)
|
||||
|
||||
info = MySQLDBInfo(host, port, username, password, db)
|
||||
return info
|
||||
|
||||
def get_seafile_mysql_info():
|
||||
seafile_conf = os.path.join(env_mgr.seafile_dir, 'seafile.conf')
|
||||
defaults = {
|
||||
'HOST': '127.0.0.1',
|
||||
'PORT': '3306',
|
||||
}
|
||||
config = Utils.read_config(seafile_conf, defaults)
|
||||
db_section = 'database'
|
||||
|
||||
if not config.has_section(db_section):
|
||||
return None
|
||||
|
||||
type = config.get(db_section, 'type')
|
||||
if type != 'mysql':
|
||||
return None
|
||||
|
||||
try:
|
||||
host = config.get(db_section, 'host')
|
||||
port = config.getint(db_section, 'port')
|
||||
username = config.get(db_section, 'user')
|
||||
password = config.get(db_section, 'password')
|
||||
db = config.get(db_section, 'db_name')
|
||||
except configparser.NoOptionError as e:
|
||||
Utils.error('Database config in seafile.conf is invalid: %s' % e)
|
||||
|
||||
info = MySQLDBInfo(host, port, username, password, db)
|
||||
return info
|
||||
|
||||
def get_seahub_mysql_info():
|
||||
sys.path.insert(0, env_mgr.top_dir)
|
||||
try:
|
||||
import seahub_settings# pylint: disable=F0401
|
||||
except ImportError as e:
|
||||
Utils.error('Failed to import seahub_settings.py: %s' % e)
|
||||
|
||||
if not hasattr(seahub_settings, 'DATABASES'):
|
||||
return None
|
||||
|
||||
try:
|
||||
d = seahub_settings.DATABASES['default']
|
||||
if d['ENGINE'] != 'django.db.backends.mysql':
|
||||
return None
|
||||
|
||||
host = d.get('HOST', '127.0.0.1')
|
||||
port = int(d.get('PORT', 3306))
|
||||
username = d['USER']
|
||||
password = d['PASSWORD']
|
||||
db = d['NAME']
|
||||
except KeyError:
|
||||
Utils.error('Database config in seahub_settings.py is invalid: %s' % e)
|
||||
|
||||
info = MySQLDBInfo(host, port, username, password, db)
|
||||
return info
|
||||
|
||||
def get_seafile_db_infos():
|
||||
ccnet_db_info = get_ccnet_mysql_info()
|
||||
seafile_db_info = get_seafile_mysql_info()
|
||||
seahub_db_info = get_seahub_mysql_info()
|
||||
|
||||
infos = [ccnet_db_info, seafile_db_info, seahub_db_info]
|
||||
|
||||
for info in infos:
|
||||
if info is None:
|
||||
return None
|
||||
if info.host not in ('localhost', '127.0.0.1'):
|
||||
return None
|
||||
return infos
|
||||
|
||||
def ask_root_password(port):
|
||||
while True:
|
||||
desc = 'What is the root password for mysql? '
|
||||
password = getpass.getpass(desc).strip()
|
||||
if password:
|
||||
try:
|
||||
return check_mysql_user('root', password, port)
|
||||
except InvalidAnswer as e:
|
||||
print('\n%s\n' % e)
|
||||
continue
|
||||
|
||||
class InvalidAnswer(Exception):
|
||||
def __init__(self, msg):
|
||||
Exception.__init__(self)
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
def check_mysql_user(user, password, port):
|
||||
print('\nverifying password of root user %s ... ' % user, end=' ')
|
||||
kwargs = dict(host='localhost',
|
||||
port=port,
|
||||
user=user,
|
||||
passwd=password)
|
||||
|
||||
try:
|
||||
conn = pymysql.connect(**kwargs)
|
||||
except Exception as e:
|
||||
if isinstance(e, pymysql.err.OperationalError):
|
||||
raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s'
|
||||
% (user, e.args[1]))
|
||||
else:
|
||||
raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s'
|
||||
% (user, e))
|
||||
|
||||
print('done')
|
||||
return conn
|
||||
|
||||
def apply_fix(root_conn, user, dbs):
|
||||
for db in dbs:
|
||||
grant_db_permission(root_conn, user, db)
|
||||
|
||||
cursor = root_conn.cursor()
|
||||
sql = """
|
||||
SELECT *
|
||||
FROM mysql.user
|
||||
WHERE Host = '%%'
|
||||
AND password = ''
|
||||
AND User = '%s'
|
||||
""" % user
|
||||
cursor.execute(sql)
|
||||
if cursor.rowcount > 0:
|
||||
sql = 'DROP USER `%s`@`%%`' % user
|
||||
cursor.execute(sql)
|
||||
|
||||
def grant_db_permission(conn, user, db):
|
||||
cursor = conn.cursor()
|
||||
sql = '''GRANT ALL PRIVILEGES ON `%s`.* to `%s`@localhost ''' \
|
||||
% (db, user)
|
||||
|
||||
try:
|
||||
cursor.execute(sql)
|
||||
except Exception as e:
|
||||
if isinstance(e, pymysql.err.OperationalError):
|
||||
Utils.error('Failed to grant permission of database %s: %s' % (db, e.args[1]))
|
||||
else:
|
||||
Utils.error('Failed to grant permission of database %s: %s' % (db, e))
|
||||
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def main():
|
||||
dbinfos = get_seafile_db_infos()
|
||||
if not dbinfos:
|
||||
return
|
||||
if dbinfos[0].username == 'root':
|
||||
return
|
||||
|
||||
if not HAS_PYMYSQL:
|
||||
Utils.error('Python pymysql module is not found')
|
||||
root_conn = ask_root_password(dbinfos[0].port)
|
||||
apply_fix(root_conn, dbinfos[0].username, [info.db for info in dbinfos])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,168 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh
|
||||
UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/
|
||||
INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/
|
||||
TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/
|
||||
|
||||
echo
|
||||
echo "-------------------------------------------------------------"
|
||||
echo "This script would do the minor upgrade for you."
|
||||
echo "Press [ENTER] to contiune"
|
||||
echo "-------------------------------------------------------------"
|
||||
echo
|
||||
read dummy
|
||||
|
||||
media_dir=${INSTALLPATH}/seahub/media
|
||||
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
|
||||
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
|
||||
seafile_server_symlink=${TOPDIR}/seafile-server-latest
|
||||
default_conf_dir=${TOPDIR}/conf
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
seahub_data_dir=${TOPDIR}/seahub-data
|
||||
elasticsearch_config_file=${seafile_server_symlink}/pro/elasticsearch/config/jvm.options
|
||||
|
||||
function migrate_avatars() {
|
||||
echo
|
||||
echo "------------------------------"
|
||||
echo "migrating avatars ..."
|
||||
echo
|
||||
# move "media/avatars" directory outside
|
||||
if [[ ! -d ${dest_avatar_dir} ]]; then
|
||||
echo
|
||||
echo "Error: avatars directory \"${dest_avatar_dir}\" does not exist" 2>&1
|
||||
echo
|
||||
exit 1
|
||||
|
||||
elif [[ ! -L ${orig_avatar_dir} ]]; then
|
||||
mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
rm -rf "${orig_avatar_dir}"
|
||||
ln -s ../../../seahub-data/avatars "${media_dir}"
|
||||
fi
|
||||
echo
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
||||
}
|
||||
|
||||
function make_media_custom_symlink() {
|
||||
media_symlink=${INSTALLPATH}/seahub/media/custom
|
||||
if [[ -L "${media_symlink}" ]]; then
|
||||
return
|
||||
|
||||
elif [[ ! -e "${media_symlink}" ]]; then
|
||||
ln -s ../../../seahub-data/custom "${media_symlink}"
|
||||
return
|
||||
|
||||
|
||||
elif [[ -d "${media_symlink}" ]]; then
|
||||
cp -rf "${media_symlink}" "${seahub_data_dir}/"
|
||||
rm -rf "${media_symlink}"
|
||||
ln -s ../../../seahub-data/custom "${media_symlink}"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
function move_old_customdir_outside() {
|
||||
# find the path of the latest seafile server folder
|
||||
if [[ -L ${seafile_server_symlink} ]]; then
|
||||
latest_server=$(readlink -f "${seafile_server_symlink}")
|
||||
else
|
||||
return
|
||||
fi
|
||||
|
||||
old_customdir=${latest_server}/seahub/media/custom
|
||||
|
||||
# old customdir is already a symlink, do nothing
|
||||
if [[ -L "${old_customdir}" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# old customdir does not exist, do nothing
|
||||
if [[ ! -e "${old_customdir}" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# media/custom exist and is not a symlink
|
||||
cp -rf "${old_customdir}" "${seahub_data_dir}/"
|
||||
}
|
||||
|
||||
function update_latest_symlink() {
|
||||
# update the symlink seafile-server to the new server version
|
||||
echo
|
||||
echo "updating seafile-server-latest symbolic link to ${INSTALLPATH} ..."
|
||||
echo
|
||||
if ! rm -f "${seafile_server_symlink}"; then
|
||||
echo "Failed to remove ${seafile_server_symlink}"
|
||||
echo
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then
|
||||
echo "Failed to update ${seafile_server_symlink} symbolic link."
|
||||
echo
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function move_old_elasticsearch_config_to_latest() {
|
||||
# Move the elasticsearch's configuration file from the old version to the new version
|
||||
echo
|
||||
echo "Moving the elasticsearch's configuration file ..."
|
||||
echo
|
||||
if [[ -f ${elasticsearch_config_file} ]]; then
|
||||
/bin/cp -avf ${elasticsearch_config_file} ${INSTALLPATH}/pro/elasticsearch/config/jvm.options
|
||||
fi
|
||||
}
|
||||
|
||||
function read_seafile_data_dir() {
|
||||
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
|
||||
if [[ -f ${seafile_ini} ]]; then
|
||||
seafile_data_dir=$(cat "${seafile_ini}")
|
||||
if [[ ! -d ${seafile_data_dir} ]]; then
|
||||
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
|
||||
echo "Please check it first, or create this directory yourself."
|
||||
echo ""
|
||||
exit 1;
|
||||
else
|
||||
if [[ ${seafile_data_dir} != ${TOPDIR}/seafile-data ]]; then
|
||||
if [[ ! -L ${TOPDIR}/seafile-data ]]; then
|
||||
ln -s ${seafile_data_dir} ${TOPDIR}/seafile-data
|
||||
echo "Created the symlink ${TOPDIR}/seafile-data for ${seafile_data_dir}."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function rename_gunicorn_config() {
|
||||
echo
|
||||
echo "renaming the gunicorn.conf to gunicorn.conf.py ..."
|
||||
echo
|
||||
if [[ -f "${default_conf_dir}/gunicorn.conf" ]]; then
|
||||
mv "${default_conf_dir}/gunicorn.conf" "${default_conf_dir}/gunicorn.conf.py" 1>/dev/null
|
||||
fi
|
||||
|
||||
if [[ -f "${default_conf_dir}/gunicorn.conf.py" ]]; then
|
||||
echo 'Done'
|
||||
else
|
||||
echo "Failed to renamed the gunicorn.conf to gunicorn.conf.py."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
read_seafile_data_dir;
|
||||
rename_gunicorn_config;
|
||||
migrate_avatars;
|
||||
|
||||
move_old_customdir_outside;
|
||||
make_media_custom_symlink;
|
||||
|
||||
move_old_elasticsearch_config_to_latest;
|
||||
|
||||
update_latest_symlink;
|
||||
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0")
|
||||
UPGRADEDIR=$(dirname "${SCRIPT}")
|
||||
INSTALLPATH=$(dirname "${UPGRADEDIR}")
|
||||
TOPDIR=$(dirname "${INSTALLPATH}")
|
||||
|
||||
seahub_secret_keygen=${INSTALLPATH}/seahub/tools/secret_key_generator.py
|
||||
seahub_settings_py=${TOPDIR}/seahub_settings.py
|
||||
|
||||
line="SECRET_KEY = \"$(python $seahub_secret_keygen)\""
|
||||
|
||||
sed -i -e "/SECRET_KEY/c\\$line" $seahub_settings_py
|
@ -1,47 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `wiki_groupwiki` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`group_id` int(11) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `group_id` (`group_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `wiki_personalwiki` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `username` (`username`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `group_publicgroup` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`group_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `group_publicgroup_425ae3c4` (`group_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `base_filediscuss` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`group_message_id` int(11) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`path` longtext NOT NULL,
|
||||
`path_hash` varchar(12) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `base_filediscuss_3c1a2584` (`group_message_id`),
|
||||
KEY `base_filediscuss_6844bd5a` (`path_hash`),
|
||||
CONSTRAINT `group_message_id_refs_id_2ade200f` FOREIGN KEY (`group_message_id`) REFERENCES `group_groupmessage` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `base_filelastmodifiedinfo` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`file_id` varchar(40) NOT NULL,
|
||||
`file_path` longtext NOT NULL,
|
||||
`file_path_hash` varchar(12) NOT NULL,
|
||||
`last_modified` bigint(20) NOT NULL,
|
||||
`email` varchar(75) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `repo_id` (`repo_id`,`file_path_hash`),
|
||||
KEY `base_filelastmodifiedinfo_359081cc` (`repo_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;
|
@ -1,39 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "wiki_groupwiki" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"group_id" integer NOT NULL UNIQUE,
|
||||
"repo_id" varchar(36) NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "wiki_personalwiki" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"username" varchar(256) NOT NULL UNIQUE,
|
||||
"repo_id" varchar(36) NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "group_publicgroup" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"group_id" integer NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS "group_publicgroup_bda51c3c" ON "group_publicgroup" ("group_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "base_filediscuss" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"group_message_id" integer NOT NULL REFERENCES "group_groupmessage" ("id"),
|
||||
"repo_id" varchar(40) NOT NULL,
|
||||
"path" text NOT NULL,
|
||||
"path_hash" varchar(12) NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS "base_filediscuss_6844bd5a" ON "base_filediscuss" ("path_hash");
|
||||
CREATE INDEX IF NOT EXISTS "base_filediscuss_c3e5da7c" ON "base_filediscuss" ("group_message_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "base_filelastmodifiedinfo" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"repo_id" varchar(36) NOT NULL,
|
||||
"file_id" varchar(40) NOT NULL,
|
||||
"file_path" text NOT NULL,
|
||||
"file_path_hash" varchar(12) NOT NULL,
|
||||
"last_modified" bigint NOT NULL,
|
||||
"email" varchar(75) NOT NULL,
|
||||
UNIQUE ("repo_id", "file_path_hash")
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS "base_filelastmodifiedinfo_ca6f7e34" ON "base_filelastmodifiedinfo" ("repo_id");
|
@ -1 +0,0 @@
|
||||
CREATE INDEX repousertoken_email on RepoUserToken(email);
|
@ -1,17 +0,0 @@
|
||||
CREATE TABLE `message_usermessage` (
|
||||
`message_id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`message` varchar(512) NOT NULL,
|
||||
`from_email` varchar(75) NOT NULL,
|
||||
`to_email` varchar(75) NOT NULL,
|
||||
`timestamp` datetime NOT NULL,
|
||||
`ifread` tinyint(1) NOT NULL,
|
||||
PRIMARY KEY (`message_id`),
|
||||
KEY `message_usermessage_8b1dd4eb` (`from_email`),
|
||||
KEY `message_usermessage_590d1560` (`to_email`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE `message_usermsglastcheck` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`check_time` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
@ -1 +0,0 @@
|
||||
CREATE INDEX IF NOT EXISTS repousertoken_email on RepoUserToken(email);
|
@ -1,16 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "message_usermessage" (
|
||||
"message_id" integer NOT NULL PRIMARY KEY,
|
||||
"message" varchar(512) NOT NULL,
|
||||
"from_email" varchar(75) NOT NULL,
|
||||
"to_email" varchar(75) NOT NULL,
|
||||
"timestamp" datetime NOT NULL,
|
||||
"ifread" bool NOT NULL
|
||||
)
|
||||
;
|
||||
CREATE TABLE IF NOT EXISTS "message_usermsglastcheck" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"check_time" datetime NOT NULL
|
||||
)
|
||||
;
|
||||
CREATE INDEX IF NOT EXISTS "message_usermessage_8b1dd4eb" ON "message_usermessage" ("from_email");
|
||||
CREATE INDEX IF NOT EXISTS "message_usermessage_590d1560" ON "message_usermessage" ("to_email");
|
@ -1,2 +0,0 @@
|
||||
-- ccnet
|
||||
ALTER TABLE EmailUser MODIFY passwd varchar(64);
|
@ -1,30 +0,0 @@
|
||||
-- seahub
|
||||
ALTER TABLE group_groupmessage MODIFY message varchar(2048);
|
||||
ALTER TABLE group_messagereply MODIFY message varchar(2048);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `share_privatefiledirshare` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`from_user` varchar(255) NOT NULL,
|
||||
`to_user` varchar(255) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`path` longtext NOT NULL,
|
||||
`token` varchar(10) NOT NULL,
|
||||
`permission` varchar(5) NOT NULL,
|
||||
`s_type` varchar(5) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `token` (`token`),
|
||||
KEY `share_privatefiledirshare_0e7efed3` (`from_user`),
|
||||
KEY `share_privatefiledirshare_bc172800` (`to_user`),
|
||||
KEY `share_privatefiledirshare_2059abe4` (`repo_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE `message_usermsgattachment` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`user_msg_id` int(11) NOT NULL,
|
||||
`priv_file_dir_share_id` int(11) DEFAULT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `message_usermsgattachment_72f290f5` (`user_msg_id`),
|
||||
KEY `message_usermsgattachment_cee41a9a` (`priv_file_dir_share_id`),
|
||||
CONSTRAINT `priv_file_dir_share_id_refs_id_163f8f83` FOREIGN KEY (`priv_file_dir_share_id`) REFERENCES `share_privatefiledirshare` (`id`),
|
||||
CONSTRAINT `user_msg_id_refs_message_id_debb82ad` FOREIGN KEY (`user_msg_id`) REFERENCES `message_usermessage` (`message_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
@ -1,20 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "share_privatefiledirshare" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"from_user" varchar(255) NOT NULL,
|
||||
"to_user" varchar(255) NOT NULL,
|
||||
"repo_id" varchar(36) NOT NULL,
|
||||
"path" text NOT NULL,
|
||||
"token" varchar(10) NOT NULL UNIQUE,
|
||||
"permission" varchar(5) NOT NULL,
|
||||
"s_type" varchar(5) NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "message_usermsgattachment" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"user_msg_id" integer NOT NULL REFERENCES "message_usermessage" ("message_id"),
|
||||
"priv_file_dir_share_id" integer REFERENCES "share_privatefiledirshare" ("id")
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_0e7efed3" ON "share_privatefiledirshare" ("from_user");
|
||||
CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_2059abe4" ON "share_privatefiledirshare" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_bc172800" ON "share_privatefiledirshare" ("to_user");
|
@ -1,24 +0,0 @@
|
||||
-- seahub
|
||||
CREATE TABLE IF NOT EXISTS `base_groupenabledmodule` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`group_id` varchar(10) NOT NULL,
|
||||
`module_name` varchar(20) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `base_groupenabledmodule_dc00373b` (`group_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `base_userenabledmodule` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`module_name` varchar(20) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `base_userenabledmodule_ee0cafa2` (`username`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `base_userlastlogin` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`last_login` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `base_userlastlogin_ee0cafa2` (`username`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
@ -1,20 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "base_groupenabledmodule" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"group_id" varchar(10) NOT NULL,
|
||||
"module_name" varchar(20) NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "base_userenabledmodule" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"username" varchar(255) NOT NULL,
|
||||
"module_name" varchar(20) NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "base_userlastlogin" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"username" varchar(255) NOT NULL,
|
||||
"last_login" datetime NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "base_groupenabledmodule_dc00373b" ON "base_groupenabledmodule" ("group_id");
|
||||
CREATE INDEX IF NOT EXISTS "base_userenabledmodule_ee0cafa2" ON "base_userenabledmodule" ("username");
|
@ -1,53 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `captcha_captchastore` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`challenge` varchar(32) NOT NULL,
|
||||
`response` varchar(32) NOT NULL,
|
||||
`hashkey` varchar(40) NOT NULL,
|
||||
`expiration` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `hashkey` (`hashkey`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
DROP TABLE IF EXISTS `notifications_usernotification`;
|
||||
CREATE TABLE IF NOT EXISTS `notifications_usernotification` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`to_user` varchar(255) NOT NULL,
|
||||
`msg_type` varchar(30) NOT NULL,
|
||||
`detail` longtext NOT NULL,
|
||||
`timestamp` datetime NOT NULL,
|
||||
`seen` tinyint(1) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `notifications_usernotification_bc172800` (`to_user`),
|
||||
KEY `notifications_usernotification_265e5521` (`msg_type`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `options_useroptions` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`email` varchar(255) NOT NULL,
|
||||
`option_key` varchar(50) NOT NULL,
|
||||
`option_val` varchar(50) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `options_useroptions_830a6ccb` (`email`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `profile_detailedprofile` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`user` varchar(255) NOT NULL,
|
||||
`department` varchar(512) NOT NULL,
|
||||
`telephone` varchar(100) NOT NULL,
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `share_uploadlinkshare` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`path` longtext NOT NULL,
|
||||
`token` varchar(10) NOT NULL,
|
||||
`ctime` datetime NOT NULL,
|
||||
`view_cnt` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `token` (`token`),
|
||||
KEY `share_uploadlinkshare_ee0cafa2` (`username`),
|
||||
KEY `share_uploadlinkshare_2059abe4` (`repo_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
@ -1,48 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "captcha_captchastore" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"challenge" varchar(32) NOT NULL,
|
||||
"response" varchar(32) NOT NULL,
|
||||
"hashkey" varchar(40) NOT NULL UNIQUE,
|
||||
"expiration" datetime NOT NULL
|
||||
);
|
||||
|
||||
DROP TABLE IF EXISTS "notifications_usernotification";
|
||||
CREATE TABLE IF NOT EXISTS "notifications_usernotification" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"to_user" varchar(255) NOT NULL,
|
||||
"msg_type" varchar(30) NOT NULL,
|
||||
"detail" text NOT NULL,
|
||||
"timestamp" datetime NOT NULL,
|
||||
"seen" bool NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "notifications_usernotification_265e5521" ON "notifications_usernotification" ("msg_type");
|
||||
CREATE INDEX IF NOT EXISTS "notifications_usernotification_bc172800" ON "notifications_usernotification" ("to_user");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "options_useroptions" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"email" varchar(255) NOT NULL,
|
||||
"option_key" varchar(50) NOT NULL,
|
||||
"option_val" varchar(50) NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS "options_useroptions_830a6ccb" ON "options_useroptions" ("email");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "profile_detailedprofile" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"user" varchar(255) NOT NULL,
|
||||
"department" varchar(512) NOT NULL,
|
||||
"telephone" varchar(100) NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS "profile_detailedprofile_6340c63c" ON "profile_detailedprofile" ("user");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "share_uploadlinkshare" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"username" varchar(255) NOT NULL,
|
||||
"repo_id" varchar(36) NOT NULL,
|
||||
"path" text NOT NULL,
|
||||
"token" varchar(10) NOT NULL UNIQUE,
|
||||
"ctime" datetime NOT NULL,
|
||||
"view_cnt" integer NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS "share_uploadlinkshare_2059abe4" ON "share_uploadlinkshare" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "share_uploadlinkshare_ee0cafa2" ON "share_uploadlinkshare" ("username");
|
@ -1,2 +0,0 @@
|
||||
ALTER TABLE EmailUser MODIFY passwd varchar(256);
|
||||
|
@ -1,13 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `api2_tokenv2` (
|
||||
`key` varchar(40) NOT NULL,
|
||||
`user` varchar(255) NOT NULL,
|
||||
`platform` varchar(32) NOT NULL,
|
||||
`device_id` varchar(40) NOT NULL,
|
||||
`device_name` varchar(40) NOT NULL,
|
||||
`platform_version` varchar(16) NOT NULL,
|
||||
`client_version` varchar(16) NOT NULL,
|
||||
`last_accessed` datetime NOT NULL,
|
||||
`last_login_ip` char(39) DEFAULT NULL,
|
||||
PRIMARY KEY (`key`),
|
||||
UNIQUE KEY `user` (`user`,`platform`,`device_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
@ -1,12 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "api2_tokenv2" (
|
||||
"key" varchar(40) NOT NULL PRIMARY KEY,
|
||||
"user" varchar(255) NOT NULL,
|
||||
"platform" varchar(32) NOT NULL,
|
||||
"device_id" varchar(40) NOT NULL,
|
||||
"device_name" varchar(40) NOT NULL,
|
||||
"platform_version" varchar(16) NOT NULL,
|
||||
"client_version" varchar(16) NOT NULL,
|
||||
"last_accessed" datetime NOT NULL,
|
||||
"last_login_ip" char(39),
|
||||
UNIQUE ("user", "platform", "device_id")
|
||||
);
|
@ -1,20 +0,0 @@
|
||||
alter table message_usermessage add column sender_deleted_at datetime DEFAULT NULL;
|
||||
alter table message_usermessage add column recipient_deleted_at datetime DEFAULT NULL;
|
||||
|
||||
alter table share_fileshare add column password varchar(128);
|
||||
alter table share_fileshare add column expire_date datetime;
|
||||
alter table share_uploadlinkshare add column password varchar(128);
|
||||
alter table share_uploadlinkshare add column expire_date datetime;
|
||||
alter table profile_profile add column lang_code varchar(50) DEFAULT NULL;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `share_orgfileshare` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`org_id` int(11) NOT NULL,
|
||||
`file_share_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `file_share_id` (`file_share_id`),
|
||||
KEY `share_orgfileshare_944dadb6` (`org_id`),
|
||||
CONSTRAINT `file_share_id_refs_id_bd2fd9f8` FOREIGN KEY (`file_share_id`) REFERENCES `share_fileshare` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
ALTER TABLE `base_userstarredfiles` ADD INDEX `base_userstarredfiles_email` (email);
|
@ -1,16 +0,0 @@
|
||||
alter table "message_usermessage" add column "sender_deleted_at" datetime;
|
||||
alter table "message_usermessage" add column "recipient_deleted_at" datetime;
|
||||
alter table "share_fileshare" add column "password" varchar(128);
|
||||
alter table "share_fileshare" add column "expire_date" datetime;
|
||||
alter table "share_uploadlinkshare" add column "password" varchar(128);
|
||||
alter table "share_uploadlinkshare" add column "expire_date" datetime;
|
||||
alter table "profile_profile" add column "lang_code" varchar(50);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "share_orgfileshare" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"org_id" integer NOT NULL,
|
||||
"file_share_id" integer NOT NULL UNIQUE REFERENCES "share_fileshare" ("id")
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS "share_orgfileshare_944dadb6" ON "share_orgfileshare" ("org_id");
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "base_userstarredfiles_email" on "base_userstarredfiles" ("email");
|
@ -1 +0,0 @@
|
||||
ALTER TABLE `Group` ADD type VARCHAR(32);
|
@ -1,30 +0,0 @@
|
||||
ALTER TABLE SharedRepo MODIFY from_email VARCHAR(255);
|
||||
ALTER TABLE SharedRepo MODIFY to_email VARCHAR(255);
|
||||
ALTER TABLE SharedRepo ADD INDEX (from_email);
|
||||
ALTER TABLE SharedRepo ADD INDEX (to_email);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS OrgSharedRepo (
|
||||
id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
||||
org_id INT,
|
||||
repo_id CHAR(37) ,
|
||||
from_email VARCHAR(255),
|
||||
to_email VARCHAR(255),
|
||||
permission CHAR(15),
|
||||
INDEX (org_id, repo_id),
|
||||
INDEX(from_email),
|
||||
INDEX(to_email)
|
||||
) ENGINE=INNODB;
|
||||
|
||||
ALTER TABLE OrgSharedRepo MODIFY from_email VARCHAR(255);
|
||||
ALTER TABLE OrgSharedRepo MODIFY to_email VARCHAR(255);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS RepoTrash (
|
||||
repo_id CHAR(36) PRIMARY KEY,
|
||||
repo_name VARCHAR(255),
|
||||
head_id CHAR(40),
|
||||
owner_id VARCHAR(255),
|
||||
size BIGINT(20),
|
||||
org_id INTEGER,
|
||||
INDEX(owner_id),
|
||||
INDEX(org_id)
|
||||
) ENGINE=INNODB;
|
@ -1 +0,0 @@
|
||||
ALTER TABLE `Group` ADD type VARCHAR(32);
|
@ -1,14 +0,0 @@
|
||||
CREATE INDEX IF NOT EXISTS FromEmailIndex on SharedRepo (from_email);
|
||||
CREATE INDEX IF NOT EXISTS ToEmailIndex on SharedRepo (to_email);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS RepoTrash (
|
||||
repo_id CHAR(36) PRIMARY KEY,
|
||||
repo_name VARCHAR(255),
|
||||
head_id CHAR(40),
|
||||
owner_id VARCHAR(255),
|
||||
size BIGINT UNSIGNED,
|
||||
org_id INTEGER
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS repotrash_owner_id_idx ON RepoTrash(owner_id);
|
||||
CREATE INDEX IF NOT EXISTS repotrash_org_id_idx ON RepoTrash(org_id);
|
@ -1 +0,0 @@
|
||||
alter table RepoTrash add del_time BIGINT;
|
@ -1,18 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `base_clientlogintoken` (
|
||||
`token` varchar(32) NOT NULL,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`timestamp` datetime NOT NULL,
|
||||
PRIMARY KEY (`token`),
|
||||
KEY `base_clientlogintoken_ee0cafa2` (`username`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `organizations_orgmemberquota` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`org_id` int(11) NOT NULL,
|
||||
`quota` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `organizations_orgmemberquota_944dadb6` (`org_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
REPLACE INTO django_content_type VALUES(44,'client login token','base','clientlogintoken');
|
||||
REPLACE INTO django_content_type VALUES(45,'org member quota','organizations','orgmemberquota');
|
@ -1 +0,0 @@
|
||||
alter table RepoTrash add del_time BIGINT;
|
@ -1,18 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "base_clientlogintoken" (
|
||||
"token" varchar(32) NOT NULL PRIMARY KEY,
|
||||
"username" varchar(255) NOT NULL,
|
||||
"timestamp" datetime NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "base_clientlogintoken_ee0cafa2" ON "base_clientlogintoken" ("username");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "organizations_orgmemberquota" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"org_id" integer NOT NULL,
|
||||
"quota" integer NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "organizations_orgmemberquota_944dadb6" ON "organizations_orgmemberquota" ("org_id");
|
||||
|
||||
REPLACE INTO "django_content_type" VALUES(44,'client login token','base','clientlogintoken');
|
||||
REPLACE INTO "django_content_type" VALUES(45,'org member quota','organizations','orgmemberquota');
|
@ -1,17 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `constance_config` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`key` varchar(255) NOT NULL,
|
||||
`value` longtext NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `key` (`key`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
ALTER TABLE `profile_profile` ADD `login_id` varchar(225) DEFAULT NULL;
|
||||
ALTER TABLE `profile_profile` ADD `contact_email` varchar(225) DEFAULT NULL;
|
||||
ALTER TABLE `profile_profile` ADD `institution` varchar(225) DEFAULT NULL;
|
||||
|
||||
ALTER TABLE `profile_profile` ADD UNIQUE INDEX (`login_id`);
|
||||
ALTER TABLE `profile_profile` ADD INDEX (`contact_email`);
|
||||
ALTER TABLE `profile_profile` ADD INDEX (`institution`);
|
||||
|
||||
|
@ -1,13 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "constance_config" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"key" varchar(255) NOT NULL UNIQUE,
|
||||
"value" text NOT NULL
|
||||
);
|
||||
|
||||
ALTER TABLE "profile_profile" ADD COLUMN "login_id" varchar(225);
|
||||
ALTER TABLE "profile_profile" ADD COLUMN "contact_email" varchar(225);
|
||||
ALTER TABLE "profile_profile" ADD COLUMN "institution" varchar(225);
|
||||
|
||||
CREATE UNIQUE INDEX "profile_profile_1b43c217" ON "profile_profile" ("login_id");
|
||||
CREATE INDEX "profile_profile_3b46cb17" ON "profile_profile" ("contact_email");
|
||||
CREATE INDEX "profile_profile_71bbc151" ON "profile_profile" ("institution");
|
@ -1 +0,0 @@
|
||||
alter table RepoTokenPeerInfo add client_ver varchar(20);
|
@ -1,124 +0,0 @@
|
||||
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||
/*!40101 SET NAMES utf8 */;
|
||||
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
|
||||
/*!40103 SET TIME_ZONE='+00:00' */;
|
||||
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
|
||||
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
|
||||
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
|
||||
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `post_office_attachment` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`file` varchar(100) NOT NULL,
|
||||
`name` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `post_office_attachment_emails` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`attachment_id` int(11) NOT NULL,
|
||||
`email_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `attachment_id` (`attachment_id`,`email_id`),
|
||||
KEY `post_office_attachment_emails_4be595e7` (`attachment_id`),
|
||||
KEY `post_office_attachment_emails_830a6ccb` (`email_id`),
|
||||
CONSTRAINT `attachment_id_refs_id_2d59d8fc` FOREIGN KEY (`attachment_id`) REFERENCES `post_office_attachment` (`id`),
|
||||
CONSTRAINT `email_id_refs_id_061d81d8` FOREIGN KEY (`email_id`) REFERENCES `post_office_email` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `post_office_emailtemplate` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`name` varchar(255) NOT NULL,
|
||||
`description` longtext NOT NULL,
|
||||
`created` datetime NOT NULL,
|
||||
`last_updated` datetime NOT NULL,
|
||||
`subject` varchar(255) NOT NULL,
|
||||
`content` longtext NOT NULL,
|
||||
`html_content` longtext NOT NULL,
|
||||
`language` varchar(12) NOT NULL,
|
||||
`default_template_id` int(11) DEFAULT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `language` (`language`,`default_template_id`),
|
||||
KEY `post_office_emailtemplate_84c7951d` (`default_template_id`),
|
||||
CONSTRAINT `default_template_id_refs_id_a2bc649e` FOREIGN KEY (`default_template_id`) REFERENCES `post_office_emailtemplate` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `post_office_email` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`from_email` varchar(254) NOT NULL,
|
||||
`to` longtext NOT NULL,
|
||||
`cc` longtext NOT NULL,
|
||||
`bcc` longtext NOT NULL,
|
||||
`subject` varchar(255) NOT NULL,
|
||||
`message` longtext NOT NULL,
|
||||
`html_message` longtext NOT NULL,
|
||||
`status` smallint(5) unsigned DEFAULT NULL,
|
||||
`priority` smallint(5) unsigned DEFAULT NULL,
|
||||
`created` datetime NOT NULL,
|
||||
`last_updated` datetime NOT NULL,
|
||||
`scheduled_time` datetime DEFAULT NULL,
|
||||
`headers` longtext,
|
||||
`template_id` int(11) DEFAULT NULL,
|
||||
`context` longtext,
|
||||
`backend_alias` varchar(64) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `post_office_email_48fb58bb` (`status`),
|
||||
KEY `post_office_email_63b5ea41` (`created`),
|
||||
KEY `post_office_email_470d4868` (`last_updated`),
|
||||
KEY `post_office_email_c83ff05e` (`scheduled_time`),
|
||||
KEY `post_office_email_43d23afc` (`template_id`),
|
||||
CONSTRAINT `template_id_refs_id_a5d97662` FOREIGN KEY (`template_id`) REFERENCES `post_office_emailtemplate` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `post_office_log` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`email_id` int(11) NOT NULL,
|
||||
`date` datetime NOT NULL,
|
||||
`status` smallint(5) unsigned NOT NULL,
|
||||
`exception_type` varchar(255) NOT NULL,
|
||||
`message` longtext NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `post_office_log_830a6ccb` (`email_id`),
|
||||
CONSTRAINT `email_id_refs_id_3d87f587` FOREIGN KEY (`email_id`) REFERENCES `post_office_email` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `institutions_institution` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`name` varchar(200) NOT NULL,
|
||||
`create_time` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `institutions_institutionadmin` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`user` varchar(254) NOT NULL,
|
||||
`institution_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `i_institution_id_5f792d6fe9a87ac9_fk_institutions_institution_id` (`institution_id`),
|
||||
CONSTRAINT `i_institution_id_5f792d6fe9a87ac9_fk_institutions_institution_id` FOREIGN KEY (`institution_id`) REFERENCES `institutions_institution` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `sysadmin_extra_userloginlog` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`login_date` datetime NOT NULL,
|
||||
`login_ip` varchar(128) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `sysadmin_extra_userloginlog_14c4b06b` (`username`),
|
||||
KEY `sysadmin_extra_userloginlog_28ed1ef0` (`login_date`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
ALTER TABLE `sysadmin_extra_userloginlog` MODIFY `login_ip` VARCHAR(128);
|
||||
|
||||
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
|
||||
|
||||
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
|
||||
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
|
||||
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
|
||||
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
||||
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
|
@ -1 +0,0 @@
|
||||
alter table RepoTokenPeerInfo add client_ver varchar(20);
|
@ -1,72 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "post_office_attachment" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"file" varchar(100) NOT NULL,
|
||||
"name" varchar(255) NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "post_office_attachment_emails" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"attachment_id" integer NOT NULL,
|
||||
"email_id" integer NOT NULL REFERENCES "post_office_email" ("id"),
|
||||
UNIQUE ("attachment_id", "email_id")
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "post_office_email" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"from_email" varchar(254) NOT NULL,
|
||||
"to" text NOT NULL,
|
||||
"cc" text NOT NULL,
|
||||
"bcc" text NOT NULL,
|
||||
"subject" varchar(255) NOT NULL,
|
||||
"message" text NOT NULL,
|
||||
"html_message" text NOT NULL,
|
||||
"status" smallint unsigned,
|
||||
"priority" smallint unsigned,
|
||||
"created" datetime NOT NULL,
|
||||
"last_updated" datetime NOT NULL,
|
||||
"scheduled_time" datetime,
|
||||
"headers" text,
|
||||
"template_id" integer,
|
||||
"context" text,
|
||||
"backend_alias" varchar(64) NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "post_office_emailtemplate" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"name" varchar(255) NOT NULL,
|
||||
"description" text NOT NULL,
|
||||
"created" datetime NOT NULL,
|
||||
"last_updated" datetime NOT NULL,
|
||||
"subject" varchar(255) NOT NULL,
|
||||
"content" text NOT NULL,
|
||||
"html_content" text NOT NULL,
|
||||
"language" varchar(12) NOT NULL,
|
||||
"default_template_id" integer,
|
||||
UNIQUE ("language", "default_template_id")
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "post_office_log" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"email_id" integer NOT NULL REFERENCES "post_office_email" ("id"),
|
||||
"date" datetime NOT NULL,
|
||||
"status" smallint unsigned NOT NULL,
|
||||
"exception_type" varchar(255) NOT NULL,
|
||||
"message" text NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "institutions_institution" (
|
||||
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
"name" varchar(200) NOT NULL,
|
||||
"create_time" datetime NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "institutions_institutionadmin" (
|
||||
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
"user" varchar(254) NOT NULL,
|
||||
"institution_id" integer NOT NULL REFERENCES "institutions_institution" ("id")
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "post_office_attachment_emails_4be595e7" ON "post_office_attachment_emails" ("attachment_id");
|
||||
CREATE INDEX IF NOT EXISTS "post_office_attachment_emails_830a6ccb" ON "post_office_attachment_emails" ("email_id");
|
||||
CREATE INDEX IF NOT EXISTS "post_office_email_43d23afc" ON "post_office_email" ("template_id");
|
||||
CREATE INDEX IF NOT EXISTS "post_office_email_470d4868" ON "post_office_email" ("last_updated");
|
||||
CREATE INDEX IF NOT EXISTS "post_office_email_48fb58bb" ON "post_office_email" ("status");
|
||||
CREATE INDEX IF NOT EXISTS "post_office_email_63b5ea41" ON "post_office_email" ("created");
|
||||
CREATE INDEX IF NOT EXISTS "post_office_email_c83ff05e" ON "post_office_email" ("scheduled_time");
|
||||
CREATE INDEX IF NOT EXISTS "post_office_emailtemplate_84c7951d" ON "post_office_emailtemplate" ("default_template_id");
|
||||
CREATE INDEX IF NOT EXISTS "post_office_log_830a6ccb" ON "post_office_log" ("email_id");
|
||||
CREATE INDEX "institutions_institutionadmin_a964baeb" ON "institutions_institutionadmin" ("institution_id");
|
@ -1,104 +0,0 @@
|
||||
ALTER TABLE api2_tokenv2 ADD COLUMN wiped_at DATETIME DEFAULT NULL;
|
||||
ALTER TABLE api2_tokenv2 ADD COLUMN created_at DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00";
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `base_filecomment` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`parent_path` longtext NOT NULL,
|
||||
`repo_id_parent_path_md5` varchar(100) NOT NULL,
|
||||
`item_name` longtext NOT NULL,
|
||||
`author` varchar(255) NOT NULL,
|
||||
`comment` longtext NOT NULL,
|
||||
`created_at` datetime NOT NULL,
|
||||
`updated_at` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `base_filecomment_9a8c79bf` (`repo_id`),
|
||||
KEY `base_filecomment_c5bf47d4` (`repo_id_parent_path_md5`),
|
||||
KEY `base_filecomment_02bd92fa` (`author`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `termsandconditions_termsandconditions` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`slug` varchar(50) NOT NULL,
|
||||
`name` longtext NOT NULL,
|
||||
`version_number` decimal(6,2) NOT NULL,
|
||||
`text` longtext,
|
||||
`info` longtext,
|
||||
`date_active` datetime DEFAULT NULL,
|
||||
`date_created` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `termsandconditions_termsandconditions_2dbcba41` (`slug`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `termsandconditions_usertermsandconditions` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`ip_address` char(39) DEFAULT NULL,
|
||||
`date_accepted` datetime NOT NULL,
|
||||
`terms_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `termsandconditions_usertermsandcon_username_f4ab54cafa29322_uniq` (`username`,`terms_id`),
|
||||
KEY `e4da106203f3f13ff96409b55de6f515` (`terms_id`),
|
||||
CONSTRAINT `e4da106203f3f13ff96409b55de6f515` FOREIGN KEY (`terms_id`) REFERENCES `termsandconditions_termsandconditions` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `two_factor_totpdevice` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`user` varchar(255) NOT NULL,
|
||||
`name` varchar(64) NOT NULL,
|
||||
`confirmed` tinyint(1) NOT NULL,
|
||||
`key` varchar(80) NOT NULL,
|
||||
`step` smallint(5) unsigned NOT NULL,
|
||||
`t0` bigint(20) NOT NULL,
|
||||
`digits` smallint(5) unsigned NOT NULL,
|
||||
`tolerance` smallint(5) unsigned NOT NULL,
|
||||
`drift` smallint(6) NOT NULL,
|
||||
`last_t` bigint(20) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `user` (`user`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `two_factor_phonedevice` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`user` varchar(255) NOT NULL,
|
||||
`name` varchar(64) NOT NULL,
|
||||
`confirmed` tinyint(1) NOT NULL,
|
||||
`number` varchar(40) NOT NULL,
|
||||
`key` varchar(40) NOT NULL,
|
||||
`method` varchar(4) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `user` (`user`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `two_factor_staticdevice` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`user` varchar(255) NOT NULL,
|
||||
`name` varchar(64) NOT NULL,
|
||||
`confirmed` tinyint(1) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `user` (`user`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `two_factor_statictoken` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`token` varchar(16) NOT NULL,
|
||||
`device_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `two_fac_device_id_55a7b345293a7c6c_fk_two_factor_staticdevice_id` (`device_id`),
|
||||
KEY `two_factor_statictoken_94a08da1` (`token`),
|
||||
CONSTRAINT `two_fac_device_id_55a7b345293a7c6c_fk_two_factor_staticdevice_id` FOREIGN KEY (`device_id`) REFERENCES `two_factor_staticdevice` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `invitations_invitation` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`token` varchar(40) NOT NULL,
|
||||
`inviter` varchar(255) NOT NULL,
|
||||
`accepter` varchar(255) NOT NULL,
|
||||
`invite_time` datetime NOT NULL,
|
||||
`accept_time` datetime DEFAULT NULL,
|
||||
`invite_type` varchar(20) NOT NULL,
|
||||
`expire_time` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `invitations_invitation_d5dd16f8` (`inviter`),
|
||||
KEY `invitations_invitation_token_1961fbb98c05e5fd_uniq` (`token`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
@ -1,24 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "base_filecomment" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "parent_path" text NOT NULL, "repo_id_parent_path_md5" varchar(100) NOT NULL, "item_name" text NOT NULL, "author" varchar(255) NOT NULL, "comment" text NOT NULL, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "base_filecomment_02bd92fa" ON "base_filecomment" ("author");
|
||||
CREATE INDEX IF NOT EXISTS "base_filecomment_9a8c79bf" ON "base_filecomment" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "base_filecomment_c5bf47d4" ON "base_filecomment" ("repo_id_parent_path_md5");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "termsandconditions_termsandconditions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "slug" varchar(50) NOT NULL, "name" text NOT NULL, "version_number" decimal NOT NULL, "text" text NULL, "info" text NULL, "date_active" datetime NULL, "date_created" datetime NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "termsandconditions_termsandconditions_2dbcba41" ON "termsandconditions_termsandconditions" ("slug");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "termsandconditions_usertermsandconditions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "ip_address" char(39) NULL, "date_accepted" datetime NOT NULL, "terms_id" integer NOT NULL REFERENCES "termsandconditions_termsandconditions" ("id"), UNIQUE ("username", "terms_id"));
|
||||
CREATE INDEX IF NOT EXISTS "termsandconditions_usertermsandconditions_2ab34720" ON "termsandconditions_usertermsandconditions" ("terms_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "two_factor_phonedevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL, "number" varchar(40) NOT NULL, "key" varchar(40) NOT NULL, "method" varchar(4) NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "two_factor_staticdevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "two_factor_statictoken" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(16) NOT NULL, "device_id" integer NOT NULL REFERENCES "two_factor_staticdevice" ("id"));
|
||||
CREATE TABLE IF NOT EXISTS "two_factor_totpdevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL, "key" varchar(80) NOT NULL, "step" smallint unsigned NOT NULL, "t0" bigint NOT NULL, "digits" smallint unsigned NOT NULL, "tolerance" smallint unsigned NOT NULL, "drift" smallint NOT NULL, "last_t" bigint NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "two_factor_statictoken_94a08da1" ON "two_factor_statictoken" ("token");
|
||||
CREATE INDEX IF NOT EXISTS "two_factor_statictoken_9379346c" ON "two_factor_statictoken" ("device_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "invitations_invitation" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(40) NOT NULL, "inviter" varchar(255) NOT NULL, "accepter" varchar(255) NOT NULL, "invite_time" datetime NOT NULL, "accept_time" datetime NULL, "invite_type" varchar(20) NOT NULL, "expire_time" datetime NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "invitations_invitation_94a08da1" ON "invitations_invitation" ("token");
|
||||
CREATE INDEX IF NOT EXISTS "invitations_invitation_d5dd16f8" ON "invitations_invitation" ("inviter");
|
||||
|
||||
ALTER TABLE api2_tokenv2 ADD COLUMN wiped_at datetime DEFAULT NULL;
|
||||
ALTER TABLE api2_tokenv2 ADD COLUMN created_at datetime NOT NULL DEFAULT '1970-01-01 00:00:00';
|
@ -1,23 +0,0 @@
|
||||
ALTER TABLE `share_fileshare` MODIFY token varchar(100);
|
||||
ALTER TABLE `share_fileshare` ADD COLUMN `permission` varchar(50) NOT NULL DEFAULT 'view_download';
|
||||
ALTER TABLE `share_uploadlinkshare` MODIFY token varchar(100);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `institutions_institutionquota` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`quota` bigint(20) NOT NULL,
|
||||
`institution_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `i_institution_id_2ca7c89373390e2c_fk_institutions_institution_id` (`institution_id`),
|
||||
CONSTRAINT `i_institution_id_2ca7c89373390e2c_fk_institutions_institution_id` FOREIGN KEY (`institution_id`) REFERENCES `institutions_institution` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `admin_log_adminlog` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`email` varchar(254) NOT NULL,
|
||||
`operation` varchar(255) NOT NULL,
|
||||
`detail` longtext NOT NULL,
|
||||
`datetime` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `admin_log_adminlog_0c83f57c` (`email`),
|
||||
KEY `admin_log_adminlog_f7235a61` (`operation`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
@ -1,9 +0,0 @@
|
||||
alter table share_fileshare add column permission varchar(50) not null default 'view_download';
|
||||
|
||||
CREATE TABLE "admin_log_adminlog" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "email" varchar(254) NOT NULL, "operation" varchar(255) NOT NULL, "detail" text NOT NULL, "datetime" datetime NOT NULL);
|
||||
|
||||
CREATE TABLE "institutions_institutionquota" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "quota" bigint NOT NULL, "institution_id" integer NOT NULL REFERENCES "institutions_institution" ("id"));
|
||||
|
||||
CREATE INDEX "admin_log_adminlog_0c83f57c" ON "admin_log_adminlog" ("email");
|
||||
CREATE INDEX "admin_log_adminlog_f7235a61" ON "admin_log_adminlog" ("operation");
|
||||
CREATE INDEX "institutions_institutionquota_a964baeb" ON "institutions_institutionquota" ("institution_id");
|
@ -1,4 +0,0 @@
|
||||
alter table LDAPUsers add column reference_id VARCHAR(255);
|
||||
alter table EmailUser add column reference_id VARCHAR(255);
|
||||
ALTER TABLE `LDAPUsers` ADD UNIQUE (`reference_id`);
|
||||
ALTER TABLE `EmailUser` ADD UNIQUE (`reference_id`);
|
@ -1,84 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `revision_tag_tags` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`name` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `name` (`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `revision_tag_revisiontags` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`path` longtext NOT NULL,
|
||||
`revision_id` varchar(255) NOT NULL,
|
||||
`tag_id` int(11) NOT NULL,
|
||||
`username` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `revision_tag_rev_tag_id_37c2d76166c50597_fk_revision_tag_tags_id` (`tag_id`),
|
||||
KEY `revision_tag_revisiontags_9a8c79bf` (`repo_id`),
|
||||
KEY `revision_tag_revisiontags_5de09a8d` (`revision_id`),
|
||||
KEY `revision_tag_revisiontags_14c4b06b` (`username`),
|
||||
CONSTRAINT `revision_tag_rev_tag_id_37c2d76166c50597_fk_revision_tag_tags_id` FOREIGN KEY (`tag_id`) REFERENCES `revision_tag_tags` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `share_extrasharepermission` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`share_to` varchar(255) NOT NULL,
|
||||
`permission` varchar(30) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `share_extrasharepermission_9a8c79bf` (`repo_id`),
|
||||
KEY `share_extrasharepermission_e4fb1dad` (`share_to`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `share_extragroupssharepermission` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`group_id` int(11) NOT NULL,
|
||||
`permission` varchar(30) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `share_extragroupssharepermission_9a8c79bf` (`repo_id`),
|
||||
KEY `share_extragroupssharepermission_0e939a4f` (`group_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `tags_fileuuidmap` (
|
||||
`uuid` char(32) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`repo_id_parent_path_md5` varchar(100) NOT NULL,
|
||||
`parent_path` longtext NOT NULL,
|
||||
`filename` varchar(1024) NOT NULL,
|
||||
`is_dir` tinyint(1) NOT NULL,
|
||||
PRIMARY KEY (`uuid`),
|
||||
KEY `tags_fileuuidmap_9a8c79bf` (`repo_id`),
|
||||
KEY `tags_fileuuidmap_c5bf47d4` (`repo_id_parent_path_md5`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `tags_tags` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`name` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `name` (`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `tags_filetag` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`uuid_id` char(32) NOT NULL,
|
||||
`tag_id` int(11) NOT NULL,
|
||||
`username` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `tags_filetag_uuid_id_5e2dc8ebbab85301_fk_tags_fileuuidmap_uuid` (`uuid_id`),
|
||||
KEY `tags_filetag_tag_id_39c4746ee9d70b71_fk_tags_tags_id` (`tag_id`),
|
||||
CONSTRAINT `tags_filetag_tag_id_39c4746ee9d70b71_fk_tags_tags_id` FOREIGN KEY (`tag_id`) REFERENCES `tags_tags` (`id`),
|
||||
CONSTRAINT `tags_filetag_uuid_id_5e2dc8ebbab85301_fk_tags_fileuuidmap_uuid` FOREIGN KEY (`uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `role_permissions_adminrole` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`email` varchar(254) NOT NULL,
|
||||
`role` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `email` (`email`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
ALTER TABLE `sysadmin_extra_userloginlog` ADD COLUMN `login_success` tinyint(1) NOT NULL default 1;
|
||||
ALTER TABLE `profile_profile` ADD COLUMN `list_in_address_book` tinyint(1) NOT NULL default 0;
|
||||
ALTER TABLE `profile_profile` ADD INDEX `profile_profile_3d5d3631` (`list_in_address_book`);
|
@ -1,4 +0,0 @@
|
||||
alter table LDAPUsers add column reference_id VARCHAR(255);
|
||||
alter table EmailUser add column reference_id VARCHAR(255);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS reference_id_index on EmailUser (reference_id);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS ldapusers_reference_id_index on LDAPUsers(reference_id);
|
@ -1,24 +0,0 @@
|
||||
alter table sysadmin_extra_userloginlog add column login_success bool not null default 1;
|
||||
alter table profile_profile add column list_in_address_book bool not null default 0;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "share_extragroupssharepermission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "group_id" integer NOT NULL, "permission" varchar(30) NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "share_extrasharepermission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "share_to" varchar(255) NOT NULL, "permission" varchar(30) NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "tags_fileuuidmap" ("uuid" char(32) NOT NULL PRIMARY KEY, "repo_id" varchar(36) NOT NULL, "repo_id_parent_path_md5" varchar(100) NOT NULL, "parent_path" text NOT NULL, "filename" varchar(1024) NOT NULL, "is_dir" bool NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "tags_tags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE);
|
||||
CREATE TABLE IF NOT EXISTS "tags_filetag" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"), "tag_id" integer NOT NULL REFERENCES "tags_tags" ("id"), "username" varchar(255) NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "revision_tag_tags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE);
|
||||
CREATE TABLE IF NOT EXISTS "revision_tag_revisiontags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "path" text NOT NULL, "revision_id" varchar(255) NOT NULL, "tag_id" integer NOT NULL REFERENCES "revision_tag_tags" ("id"), "username" varchar(255) NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "role_permissions_adminrole" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "email" varchar(254) NOT NULL UNIQUE, "role" varchar(255) NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "share_extragroupssharepermission_9a8c79bf" ON "share_extragroupssharepermission" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "share_extragroupssharepermission_0e939a4f" ON "share_extragroupssharepermission" ("group_id");
|
||||
CREATE INDEX IF NOT EXISTS "share_extrasharepermission_9a8c79bf" ON "share_extrasharepermission" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "share_extrasharepermission_e4fb1dad" ON "share_extrasharepermission" ("share_to");
|
||||
CREATE INDEX IF NOT EXISTS "tags_fileuuidmap_9a8c79bf" ON "tags_fileuuidmap" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "tags_fileuuidmap_c5bf47d4" ON "tags_fileuuidmap" ("repo_id_parent_path_md5");
|
||||
CREATE INDEX IF NOT EXISTS "tags_filetag_10634818" ON "tags_filetag" ("uuid_id");
|
||||
CREATE INDEX IF NOT EXISTS "tags_filetag_76f094bc" ON "tags_filetag" ("tag_id");
|
||||
CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_9a8c79bf" ON "revision_tag_revisiontags" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_5de09a8d" ON "revision_tag_revisiontags" ("revision_id");
|
||||
CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_76f094bc" ON "revision_tag_revisiontags" ("tag_id");
|
||||
CREATE INDEX IF NOT EXISTS "revision_tag_revisiontags_14c4b06b" ON "revision_tag_revisiontags" ("username");
|
||||
CREATE INDEX IF NOT EXISTS "profile_profile_3d5d3631" ON "profile_profile" ("list_in_address_book");
|
@ -1,24 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS GroupStructure (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER, path VARCHAR(1024), UNIQUE INDEX(group_id))ENGINE=INNODB;
|
||||
|
||||
alter table `Group` add column parent_group_id INTEGER default 0; -- Replace `Group` if you configured table `Group` to another name.
|
||||
|
||||
ALTER TABLE Binding ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
|
||||
ALTER TABLE LDAPConfig ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
|
||||
ALTER TABLE OrgUser DROP primary key;
|
||||
ALTER TABLE OrgUser ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE OrgUser ADD UNIQUE (org_id, email);
|
||||
|
||||
ALTER TABLE OrgGroup DROP primary key;
|
||||
ALTER TABLE OrgGroup ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE OrgGroup ADD UNIQUE (org_id, group_id);
|
||||
|
||||
ALTER TABLE GroupUser DROP primary key;
|
||||
ALTER TABLE GroupUser ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE GroupUser ADD UNIQUE (group_id, user_name);
|
||||
|
||||
ALTER TABLE GroupDNPair ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
|
@ -1,4 +0,0 @@
|
||||
ALTER TABLE Event ADD INDEX `ix_event_timestamp` (`timestamp`);
|
||||
ALTER TABLE FileAudit ADD INDEX `ix_FileAudit_timestamp` (`timestamp`);
|
||||
ALTER TABLE FileUpdate ADD INDEX `ix_FileUpdate_timestamp` (`timestamp`);
|
||||
ALTER TABLE UserTrafficStat ADD INDEX `ix_UserTrafficStat_month` (`month`);
|
@ -1,87 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, name VARCHAR(255) NOT NULL, update_time BIGINT, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255)) ENGINE=INNODB;
|
||||
|
||||
ALTER TABLE Repo DROP primary key;
|
||||
ALTER TABLE Repo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE Repo ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE RepoOwner DROP primary key;
|
||||
ALTER TABLE RepoOwner ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE RepoOwner ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE RepoGroup ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
|
||||
ALTER TABLE InnerPubRepo DROP primary key;
|
||||
ALTER TABLE InnerPubRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE InnerPubRepo ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE RepoUserToken ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
|
||||
ALTER TABLE RepoTokenPeerInfo DROP primary key;
|
||||
ALTER TABLE RepoTokenPeerInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE RepoTokenPeerInfo ADD UNIQUE (token);
|
||||
|
||||
ALTER TABLE RepoHead DROP primary key;
|
||||
ALTER TABLE RepoHead ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE RepoHead ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE RepoSize DROP primary key;
|
||||
ALTER TABLE RepoSize ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE RepoSize ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE RepoHistoryLimit DROP primary key;
|
||||
ALTER TABLE RepoHistoryLimit ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE RepoHistoryLimit ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE RepoValidSince DROP primary key;
|
||||
ALTER TABLE RepoValidSince ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE RepoValidSince ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE WebAP DROP primary key;
|
||||
ALTER TABLE WebAP ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE WebAP ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE VirtualRepo DROP primary key;
|
||||
ALTER TABLE VirtualRepo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE VirtualRepo ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE GarbageRepos DROP primary key;
|
||||
ALTER TABLE GarbageRepos ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE GarbageRepos ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE RepoTrash DROP primary key;
|
||||
ALTER TABLE RepoTrash ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE RepoTrash ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE RepoFileCount DROP primary key;
|
||||
ALTER TABLE RepoFileCount ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE RepoFileCount ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE RepoInfo DROP primary key;
|
||||
ALTER TABLE RepoInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE RepoInfo ADD UNIQUE (repo_id);
|
||||
|
||||
ALTER TABLE UserQuota DROP primary key;
|
||||
ALTER TABLE UserQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE UserQuota ADD UNIQUE (user);
|
||||
|
||||
ALTER TABLE UserShareQuota DROP primary key;
|
||||
ALTER TABLE UserShareQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE UserShareQuota ADD UNIQUE (user);
|
||||
|
||||
ALTER TABLE OrgQuota DROP primary key;
|
||||
ALTER TABLE OrgQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE OrgQuota ADD UNIQUE (org_id);
|
||||
|
||||
ALTER TABLE OrgUserQuota DROP primary key;
|
||||
ALTER TABLE OrgUserQuota ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE OrgUserQuota ADD UNIQUE (org_id, user);
|
||||
|
||||
ALTER TABLE SystemInfo ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
|
||||
ALTER TABLE Branch DROP primary key;
|
||||
ALTER TABLE Branch ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE Branch ADD UNIQUE (repo_id, name);
|
||||
|
||||
ALTER TABLE SeafileConf ADD id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
@ -1,170 +0,0 @@
|
||||
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||
/*!40101 SET NAMES utf8 */;
|
||||
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
|
||||
/*!40103 SET TIME_ZONE='+00:00' */;
|
||||
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
|
||||
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
|
||||
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
|
||||
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
|
||||
|
||||
/*!40101 SET @saved_cs_client = @@character_set_client */;
|
||||
/*!40101 SET character_set_client = utf8 */;
|
||||
CREATE TABLE IF NOT EXISTS `auth_group` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`name` varchar(80) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `name` (`name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
|
||||
/*!40000 ALTER TABLE `auth_group` DISABLE KEYS */;
|
||||
/*!40000 ALTER TABLE `auth_group` ENABLE KEYS */;
|
||||
/*!40101 SET @saved_cs_client = @@character_set_client */;
|
||||
/*!40101 SET character_set_client = utf8 */;
|
||||
CREATE TABLE IF NOT EXISTS `auth_group_permissions` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`group_id` int(11) NOT NULL,
|
||||
`permission_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `auth_group_permissions_group_id_permission_id_0cd325b0_uniq` (`group_id`,`permission_id`),
|
||||
KEY `auth_group_permissio_permission_id_84c5c92e_fk_auth_perm` (`permission_id`),
|
||||
CONSTRAINT `auth_group_permissio_permission_id_84c5c92e_fk_auth_perm` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`),
|
||||
CONSTRAINT `auth_group_permissions_group_id_b120cbf9_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
|
||||
/*!40000 ALTER TABLE `auth_group_permissions` DISABLE KEYS */;
|
||||
/*!40000 ALTER TABLE `auth_group_permissions` ENABLE KEYS */;
|
||||
/*!40101 SET @saved_cs_client = @@character_set_client */;
|
||||
/*!40101 SET character_set_client = utf8 */;
|
||||
CREATE TABLE IF NOT EXISTS `auth_permission` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`name` varchar(255) NOT NULL,
|
||||
`content_type_id` int(11) NOT NULL,
|
||||
`codename` varchar(100) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `auth_permission_content_type_id_codename_01ab375a_uniq` (`content_type_id`,`codename`),
|
||||
CONSTRAINT `auth_permission_content_type_id_2f476e4b_fk_django_co` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=209 DEFAULT CHARSET=utf8;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
|
||||
/*!40000 ALTER TABLE `auth_permission` ENABLE KEYS */;
|
||||
/*!40101 SET @saved_cs_client = @@character_set_client */;
|
||||
/*!40101 SET character_set_client = utf8 */;
|
||||
CREATE TABLE IF NOT EXISTS `auth_user` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`password` varchar(128) NOT NULL,
|
||||
`last_login` datetime DEFAULT NULL,
|
||||
`is_superuser` tinyint(1) NOT NULL,
|
||||
`username` varchar(150) NOT NULL,
|
||||
`first_name` varchar(30) NOT NULL,
|
||||
`last_name` varchar(30) NOT NULL,
|
||||
`email` varchar(254) NOT NULL,
|
||||
`is_staff` tinyint(1) NOT NULL,
|
||||
`is_active` tinyint(1) NOT NULL,
|
||||
`date_joined` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `username` (`username`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
|
||||
/*!40000 ALTER TABLE `auth_user` DISABLE KEYS */;
|
||||
/*!40000 ALTER TABLE `auth_user` ENABLE KEYS */;
|
||||
/*!40101 SET @saved_cs_client = @@character_set_client */;
|
||||
/*!40101 SET character_set_client = utf8 */;
|
||||
CREATE TABLE IF NOT EXISTS `auth_user_groups` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`user_id` int(11) NOT NULL,
|
||||
`group_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `auth_user_groups_user_id_group_id_94350c0c_uniq` (`user_id`,`group_id`),
|
||||
KEY `auth_user_groups_group_id_97559544_fk_auth_group_id` (`group_id`),
|
||||
CONSTRAINT `auth_user_groups_group_id_97559544_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`),
|
||||
CONSTRAINT `auth_user_groups_user_id_6a12ed8b_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
|
||||
/*!40000 ALTER TABLE `auth_user_groups` DISABLE KEYS */;
|
||||
/*!40000 ALTER TABLE `auth_user_groups` ENABLE KEYS */;
|
||||
/*!40101 SET @saved_cs_client = @@character_set_client */;
|
||||
/*!40101 SET character_set_client = utf8 */;
|
||||
CREATE TABLE IF NOT EXISTS `auth_user_user_permissions` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`user_id` int(11) NOT NULL,
|
||||
`permission_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `auth_user_user_permissions_user_id_permission_id_14a6b632_uniq` (`user_id`,`permission_id`),
|
||||
KEY `auth_user_user_permi_permission_id_1fbb5f2c_fk_auth_perm` (`permission_id`),
|
||||
CONSTRAINT `auth_user_user_permi_permission_id_1fbb5f2c_fk_auth_perm` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`),
|
||||
CONSTRAINT `auth_user_user_permissions_user_id_a95ead1b_fk_auth_user_id` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
|
||||
|
||||
/*!40000 ALTER TABLE `wiki_personalwiki` DISABLE KEYS */;
|
||||
/*!40000 ALTER TABLE `wiki_personalwiki` ENABLE KEYS */;
|
||||
/*!40101 SET @saved_cs_client = @@character_set_client */;
|
||||
/*!40101 SET character_set_client = utf8 */;
|
||||
CREATE TABLE IF NOT EXISTS `wiki_wiki` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`name` varchar(255) NOT NULL,
|
||||
`slug` varchar(255) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`permission` varchar(50) NOT NULL,
|
||||
`created_at` datetime NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `slug` (`slug`),
|
||||
UNIQUE KEY `wiki_wiki_username_3c0f83e1b93de663_uniq` (`username`,`repo_id`),
|
||||
KEY `wiki_wiki_fde81f11` (`created_at`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
|
||||
/*!40000 ALTER TABLE `wiki_wiki` DISABLE KEYS */;
|
||||
/*!40000 ALTER TABLE `wiki_wiki` ENABLE KEYS */;
|
||||
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
|
||||
|
||||
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
|
||||
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
|
||||
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
|
||||
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
||||
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `django_cas_ng_proxygrantingticket` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`session_key` varchar(255) DEFAULT NULL,
|
||||
`pgtiou` varchar(255) DEFAULT NULL,
|
||||
`pgt` varchar(255) DEFAULT NULL,
|
||||
`date` datetime NOT NULL,
|
||||
`user` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `django_cas_ng_proxygrant_session_key_user_id_4cd2ea19_uniq` (`session_key`,`user`),
|
||||
KEY `django_cas_ng_proxyg_user_id_f833edd2_fk_auth_user` (`user`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
/*!40101 SET character_set_client = @saved_cs_client */;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `django_cas_ng_sessionticket` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`session_key` varchar(255) NOT NULL,
|
||||
`ticket` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `organizations_orgmemberquota` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`org_id` int(11) NOT NULL,
|
||||
`quota` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `organizations_orgmemberquota_org_id_93dde51d` (`org_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
ALTER TABLE notifications_notification ADD INDEX `notifications_notification_386bba5a` (`primary`);
|
||||
|
||||
ALTER TABLE institutions_institutionadmin ADD INDEX `institutions_institutionadmin_user_7560167c8413ff0e_uniq` (`user`);
|
||||
|
||||
ALTER TABLE `post_office_attachment` add column `mimetype` varchar(255) NOT NULL;
|
@ -1,2 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, path VARCHAR(1024));
|
||||
alter table `Group` add column parent_group_id INTEGER default 0; -- Replace `Group` if you configured table `Group` to another name.
|
@ -1 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER);
|
@ -1,4 +0,0 @@
|
||||
CREATE INDEX IF NOT EXISTS ix_event_timestamp ON Event (timestamp);
|
||||
CREATE INDEX IF NOT EXISTS ix_FileAudit_timestamp ON FileAudit (timestamp);
|
||||
CREATE INDEX IF NOT EXISTS ix_FileUpdate_timestamp ON FileUpdate (timestamp);
|
||||
CREATE INDEX IF NOT EXISTS ix_UserTrafficStat_month ON UserTrafficStat (month);
|
@ -1,3 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, name VARCHAR(255) NOT NULL, update_time INTEGER, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255));
|
@ -1,39 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "auth_group" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(80) NOT NULL UNIQUE);
|
||||
CREATE TABLE IF NOT EXISTS "auth_group_permissions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "group_id" integer NOT NULL REFERENCES "auth_group" ("id"), "permission_id" integer NOT NULL REFERENCES "auth_permission" ("id"));
|
||||
CREATE TABLE IF NOT EXISTS "auth_user_groups" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user_id" integer NOT NULL REFERENCES "auth_user" ("id"), "group_id" integer NOT NULL REFERENCES "auth_group" ("id"));
|
||||
CREATE TABLE IF NOT EXISTS "auth_user_user_permissions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user_id" integer NOT NULL REFERENCES "auth_user" ("id"), "permission_id" integer NOT NULL REFERENCES "auth_permission" ("id"));
|
||||
CREATE TABLE IF NOT EXISTS "auth_permission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "content_type_id" integer NOT NULL REFERENCES "django_content_type" ("id"), "codename" varchar(100) NOT NULL, "name" varchar(255) NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "auth_user" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "password" varchar(128) NOT NULL, "last_login" datetime NULL, "is_superuser" bool NOT NULL, "first_name" varchar(30) NOT NULL, "last_name" varchar(30) NOT NULL, "email" varchar(254) NOT NULL, "is_staff" bool NOT NULL, "is_active" bool NOT NULL, "date_joined" datetime NOT NULL, "username" varchar(150) NOT NULL UNIQUE);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "organizations_orgmemberquota" (
|
||||
"id" integer NOT NULL PRIMARY KEY,
|
||||
"org_id" integer NOT NULL,
|
||||
"quota" integer NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS "django_cas_ng_sessionticket" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "session_key" varchar(255) NOT NULL, "ticket" varchar(255) NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "django_cas_ng_proxygrantingticket" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "session_key" varchar(255) NULL, "pgtiou" varchar(255) NULL, "pgt" varchar(255) NULL, "date" datetime NOT NULL, "user" varchar(255) NOT NULL);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "auth_group_permissions_group_id_permission_id_0cd325b0_uniq" ON "auth_group_permissions" ("group_id", "permission_id");
|
||||
CREATE INDEX IF NOT EXISTS "auth_group_permissions_group_id_b120cbf9" ON "auth_group_permissions" ("group_id");
|
||||
CREATE INDEX IF NOT EXISTS "auth_group_permissions_permission_id_84c5c92e" ON "auth_group_permissions" ("permission_id");
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "auth_user_groups_user_id_group_id_94350c0c_uniq" ON "auth_user_groups" ("user_id", "group_id");
|
||||
CREATE INDEX IF NOT EXISTS "auth_user_groups_user_id_6a12ed8b" ON "auth_user_groups" ("user_id");
|
||||
CREATE INDEX IF NOT EXISTS "auth_user_groups_group_id_97559544" ON "auth_user_groups" ("group_id");
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "auth_user_user_permissions_user_id_permission_id_14a6b632_uniq" ON "auth_user_user_permissions" ("user_id", "permission_id");
|
||||
CREATE INDEX IF NOT EXISTS "auth_user_user_permissions_user_id_a95ead1b" ON "auth_user_user_permissions" ("user_id");
|
||||
CREATE INDEX IF NOT EXISTS "auth_user_user_permissions_permission_id_1fbb5f2c" ON "auth_user_user_permissions" ("permission_id");
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "auth_permission_content_type_id_codename_01ab375a_uniq" ON "auth_permission" ("content_type_id", "codename");
|
||||
CREATE INDEX IF NOT EXISTS "auth_permission_content_type_id_2f476e4b" ON "auth_permission" ("content_type_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "wiki_wiki" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "name" varchar(255) NOT NULL, "slug" varchar(255) NOT NULL UNIQUE, "repo_id" varchar(36) NOT NULL, "permission" varchar(50) NOT NULL, "created_at" datetime NOT NULL, UNIQUE ("username", "repo_id"));
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "wiki_wiki_fde81f11" ON "wiki_wiki" ("created_at");
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "notifications_notification_386bba5a" ON "notifications_notification" ("primary");
|
||||
CREATE INDEX IF NOT EXISTS "institutions_institutionadmin_ee11cbb1" ON "institutions_institutionadmin" ("user");
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "organizations_orgmemberquota_944dadb6" ON "organizations_orgmemberquota" ("org_id");
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "django_cas_ng_proxygrantingticket_session_key_user_8a4ec2bc_uniq" ON "django_cas_ng_proxygrantingticket" ("session_key", "user");
|
||||
CREATE INDEX IF NOT EXISTS "django_cas_ng_proxygrantingticket_user_1f42619d" ON "django_cas_ng_proxygrantingticket" ("user");
|
||||
|
||||
ALTER TABLE "post_office_attachment" add column "mimetype" varchar(255);
|
@ -1 +0,0 @@
|
||||
ALTER TABLE UserRole ADD COLUMN is_manual_set INTEGER DEFAULT 0;
|
@ -1,4 +0,0 @@
|
||||
ALTER TABLE RepoInfo ADD COLUMN status INTEGER DEFAULT 0;
|
||||
CREATE TABLE IF NOT EXISTS RepoSyncError (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, token CHAR(41), error_time BIGINT UNSIGNED, error_con VARCHAR(1024), UNIQUE INDEX(token));
|
||||
ALTER TABLE RepoSyncError MODIFY COLUMN error_con VARCHAR(1024);
|
||||
CREATE TABLE IF NOT EXISTS WebUploadTempFiles (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL);
|
@ -1,125 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `drafts_draft` (
|
||||
`id` int(11) NOT NULL,
|
||||
`created_at` datetime(6) NOT NULL,
|
||||
`updated_at` datetime(6) NOT NULL,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`origin_repo_id` varchar(36) NOT NULL,
|
||||
`origin_file_version` varchar(100) NOT NULL,
|
||||
`draft_file_path` varchar(1024) NOT NULL,
|
||||
`origin_file_uuid` char(32) NOT NULL,
|
||||
`publish_file_version` varchar(100) DEFAULT NULL,
|
||||
`status` varchar(20) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `drafts_draft_origin_file_uuid_id_f150319e_fk_tags_file` (`origin_file_uuid`),
|
||||
KEY `drafts_draft_created_at_e9f4523f` (`created_at`),
|
||||
KEY `drafts_draft_updated_at_0a144b05` (`updated_at`),
|
||||
KEY `drafts_draft_username_73e6738b` (`username`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `drafts_draftreviewer` (
|
||||
`id` int(11) NOT NULL,
|
||||
`reviewer` varchar(255) NOT NULL,
|
||||
`draft_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `drafts_draftreviewer_reviewer_e4c777ac` (`reviewer`),
|
||||
KEY `drafts_draftreviewer_draft_id_4ea59775_fk_drafts_draft_id` (`draft_id`),
|
||||
CONSTRAINT `drafts_draftreviewer_draft_id_4ea59775_fk_drafts_draft_id` FOREIGN KEY (`draft_id`) REFERENCES `drafts_draft` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
ALTER TABLE `options_useroptions` ADD INDEX `options_useroptions_option_key_7bf7ae4b` (`option_key`);
|
||||
|
||||
ALTER TABLE TotalStorageStat DROP primary key;
|
||||
ALTER TABLE TotalStorageStat ADD `id` BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE TotalStorageStat ADD `org_id` INT NOT NULL DEFAULT -1;
|
||||
ALTER TABLE TotalStorageStat ADD INDEX `idx_storage_time_org` (`timestamp`, `org_id`);
|
||||
|
||||
ALTER TABLE FileOpsStat ADD `org_id` INT NOT NULL DEFAULT -1;
|
||||
ALTER TABLE FileOpsStat ADD INDEX `idx_file_ops_time_org` (`timestamp`, `org_id`);
|
||||
|
||||
ALTER TABLE UserActivityStat DROP primary key;
|
||||
ALTER TABLE UserActivityStat ADD `id` BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST;
|
||||
ALTER TABLE UserActivityStat ADD UNIQUE (name_time_md5);
|
||||
ALTER TABLE UserActivityStat ADD `org_id` INT NOT NULL DEFAULT -1;
|
||||
ALTER TABLE UserActivityStat ADD INDEX `idx_activity_time_org` (`timestamp`, `org_id`);
|
||||
|
||||
DROP TABLE UserTrafficStat;
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `repo_tags_repotags` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`name` varchar(255) NOT NULL,
|
||||
`color` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `repo_tags_repotags_repo_id_1163a48f` (`repo_id`),
|
||||
KEY `repo_tags_repotags_name_3f4c9027` (`name`),
|
||||
KEY `repo_tags_repotags_color_1292b6c1` (`color`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `file_tags_filetags` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`file_uuid_id` char(32) NOT NULL,
|
||||
`repo_tag_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `file_tags_filetags_file_uuid_id_e30f0ec8_fk_tags_file` (`file_uuid_id`),
|
||||
KEY `file_tags_filetags_repo_tag_id_c39660cb_fk_repo_tags_repotags_id` (`repo_tag_id`),
|
||||
CONSTRAINT `file_tags_filetags_file_uuid_id_e30f0ec8_fk_tags_file` FOREIGN KEY (`file_uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`),
|
||||
CONSTRAINT `file_tags_filetags_repo_tag_id_c39660cb_fk_repo_tags_repotags_id` FOREIGN KEY (`repo_tag_id`) REFERENCES `repo_tags_repotags` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `related_files_relatedfiles` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`o_uuid_id` char(32) NOT NULL,
|
||||
`r_uuid_id` char(32) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `related_files_relate_o_uuid_id_aaa8e613_fk_tags_file` (`o_uuid_id`),
|
||||
KEY `related_files_relate_r_uuid_id_031751df_fk_tags_file` (`r_uuid_id`),
|
||||
CONSTRAINT `related_files_relate_o_uuid_id_aaa8e613_fk_tags_file` FOREIGN KEY (`o_uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`),
|
||||
CONSTRAINT `related_files_relate_r_uuid_id_031751df_fk_tags_file` FOREIGN KEY (`r_uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `organizations_orgsettings` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`org_id` int(11) NOT NULL,
|
||||
`role` varchar(100) DEFAULT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `organizations_orgsettings_org_id_630f6843_uniq` (`org_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
DROP INDEX `profile_profile_contact_email_0975e4bf_uniq` ON `profile_profile`;
|
||||
ALTER TABLE `profile_profile` ADD CONSTRAINT `profile_profile_contact_email_0975e4bf_uniq` UNIQUE (`contact_email`);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `social_auth_usersocialauth` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`provider` varchar(32) NOT NULL,
|
||||
`uid` varchar(150) NOT NULL,
|
||||
`extra_data` longtext NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `social_auth_usersocialauth_provider_uid_e6b5e668_uniq` (`provider`,`uid`),
|
||||
KEY `social_auth_usersocialauth_username_3f06b5cf` (`username`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
|
||||
ALTER TABLE `base_filecomment` ADD `detail` LONGTEXT DEFAULT NULL;
|
||||
ALTER TABLE `base_filecomment` ADD `resolved` TINYINT(1) NOT NULL DEFAULT 0;
|
||||
ALTER TABLE `base_filecomment` ADD INDEX `resolved` (`resolved`);
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `base_reposecretkey` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`secret_key` varchar(44) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `repo_id` (`repo_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
@ -1 +0,0 @@
|
||||
ALTER TABLE UserRole ADD COLUMN is_manual_set INTEGER DEFAULT 0;
|
@ -1,7 +0,0 @@
|
||||
ALTER TABLE RepoInfo ADD COLUMN status INTEGER DEFAULT 0;
|
||||
CREATE TABLE IF NOT EXISTS RepoSyncError (token CHAR(41) PRIMARY KEY, error_time BIGINT, error_con VARCHAR(1024));
|
||||
ALTER TABLE RepoSyncError RENAME TO TmpRepoSyncError;
|
||||
CREATE TABLE RepoSyncError (token CHAR(41) PRIMARY KEY, error_time BIGINT, error_con VARCHAR(1024));
|
||||
INSERT INTO RepoSyncError SELECT * FROM TmpRepoSyncError;
|
||||
DROP TABLE TmpRepoSyncError;
|
||||
CREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL);
|
@ -1,40 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "drafts_draft" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL, "username" varchar(255) NOT NULL, "origin_repo_id" varchar(36) NOT NULL, "origin_file_version" varchar(100) NOT NULL, "draft_file_path" varchar(1024) NOT NULL, "publish_file_version" varchar(100) NULL, "status" varchar(20) NOT NULL, "origin_file_uuid" char(32) NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draft_created_at_e9f4523f" ON "drafts_draft" ("created_at");
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draft_updated_at_0a144b05" ON "drafts_draft" ("updated_at");
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draft_username_73e6738b" ON "drafts_draft" ("username");
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draft_origin_file_uuid_7c003c98" ON "drafts_draft" ("origin_file_uuid");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "drafts_draftreviewer" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "reviewer" varchar(255) NOT NULL, "draft_id" integer NOT NULL REFERENCES "drafts_draft" ("id"));
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draftreviewer_reviewer_e4c777ac" ON "drafts_draftreviewer" ("reviewer");
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draftreviewer_draft_id_4ea59775" ON "drafts_draftreviewer" ("draft_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "social_auth_association" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "server_url" varchar(255) NOT NULL, "handle" varchar(255) NOT NULL, "secret" varchar(255) NOT NULL, "issued" integer NOT NULL, "lifetime" integer NOT NULL, "assoc_type" varchar(64) NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "social_auth_code" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "email" varchar(254) NOT NULL, "code" varchar(32) NOT NULL, "verified" bool NOT NULL, "timestamp" datetime NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "social_auth_nonce" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "server_url" varchar(255) NOT NULL, "timestamp" integer NOT NULL, "salt" varchar(65) NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "social_auth_partial" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(32) NOT NULL, "next_step" smallint unsigned NOT NULL, "backend" varchar(32) NOT NULL, "data" text NOT NULL, "timestamp" datetime NOT NULL);
|
||||
CREATE TABLE IF NOT EXISTS "social_auth_usersocialauth" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "provider" varchar(32) NOT NULL, "uid" varchar(255) NOT NULL, "extra_data" text NOT NULL);
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "repo_tags_repotags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "name" varchar(255) NOT NULL, "color" varchar(255) NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "repo_tags_repotags_repo_id_1163a48f" ON "repo_tags_repotags" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "repo_tags_repotags_name_3f4c9027" ON "repo_tags_repotags" ("name");
|
||||
CREATE INDEX IF NOT EXISTS "repo_tags_repotags_color_1292b6c1" ON "repo_tags_repotags" ("color");
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "file_tags_filetags" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "file_uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"), "repo_tag_id" integer NOT NULL REFERENCES "repo_tags_repotags" ("id"));
|
||||
CREATE INDEX IF NOT EXISTS "file_tags_filetags_file_uuid_id_e30f0ec8" ON "file_tags_filetags" ("file_uuid_id");
|
||||
CREATE INDEX IF NOT EXISTS "file_tags_filetags_repo_tag_id_c39660cb" ON "file_tags_filetags" ("repo_tag_id");
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "related_files_relatedfiles" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "o_uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"), "r_uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"));
|
||||
CREATE INDEX IF NOT EXISTS "related_files_relatedfiles_o_uuid_id_aaa8e613" ON "related_files_relatedfiles" ("o_uuid_id");
|
||||
CREATE INDEX IF NOT EXISTS "related_files_relatedfiles_r_uuid_id_031751df" ON "related_files_relatedfiles" ("r_uuid_id");
|
||||
|
||||
|
||||
ALTER TABLE "base_filecomment" ADD COLUMN "detail" text DEFAULT NULL;
|
||||
ALTER TABLE "base_filecomment" ADD COLUMN "resolved" bool NOT NULL DEFAULT 0;
|
||||
CREATE INDEX IF NOT EXISTS "base_filecomment_resolved_e0717eca" ON "base_filecomment" ("resolved");
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "base_reposecretkey" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL UNIQUE, "secret_key" varchar(44) NOT NULL);
|
||||
|
@ -1,73 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `base_reposecretkey` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`secret_key` varchar(44) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `repo_id` (`repo_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
ALTER TABLE `constance_config` MODIFY `value` longtext DEFAULT NULL;
|
||||
ALTER TABLE `constance_config` CHANGE `key` `constance_key` varchar(255) NOT NULL;
|
||||
|
||||
DROP INDEX `drafts_draft_origin_file_uuid_7c003c98_uniq` ON `drafts_draft`;
|
||||
ALTER TABLE `drafts_draft` ADD CONSTRAINT `drafts_draft_origin_file_uuid_7c003c98_uniq` UNIQUE (`origin_file_uuid`);
|
||||
CREATE INDEX `drafts_draft_origin_repo_id_8978ca2c` ON `drafts_draft` (`origin_repo_id`);
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `file_participants_fileparticipant` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`uuid_id` char(32) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `file_participants_fileparticipant_uuid_id_username_c747dd36_uniq` (`uuid_id`,`username`),
|
||||
CONSTRAINT `file_participants_fi_uuid_id_861b7339_fk_tags_file` FOREIGN KEY (`uuid_id`) REFERENCES `tags_fileuuidmap` (`uuid`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `repo_api_tokens` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`app_name` varchar(255) NOT NULL,
|
||||
`token` varchar(40) NOT NULL,
|
||||
`generated_at` datetime NOT NULL,
|
||||
`generated_by` varchar(255) NOT NULL,
|
||||
`last_access` datetime NOT NULL,
|
||||
`permission` varchar(15) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `token` (`token`),
|
||||
KEY `repo_api_tokens_repo_id_47a50fef` (`repo_id`),
|
||||
KEY `repo_api_tokens_app_name_7c395c31` (`app_name`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `abuse_reports_abusereport` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`reporter` longtext DEFAULT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`repo_name` varchar(255) NOT NULL,
|
||||
`file_path` longtext DEFAULT NULL,
|
||||
`abuse_type` varchar(255) NOT NULL,
|
||||
`description` longtext DEFAULT NULL,
|
||||
`handled` tinyint(1) NOT NULL,
|
||||
`time` datetime(6) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `abuse_reports_abusereport_abuse_type_703d5335` (`abuse_type`),
|
||||
KEY `abuse_reports_abusereport_handled_94b8304c` (`handled`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `repo_share_invitation` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`path` longtext NOT NULL,
|
||||
`permission` varchar(50) NOT NULL,
|
||||
`invitation_id` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `repo_share_invitatio_invitation_id_b71effd2_fk_invitatio` (`invitation_id`),
|
||||
KEY `repo_share_invitation_repo_id_7bcf84fa` (`repo_id`),
|
||||
CONSTRAINT `repo_share_invitatio_invitation_id_b71effd2_fk_invitatio` FOREIGN KEY (`invitation_id`) REFERENCES `invitations_invitation` (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
ALTER TABLE `post_office_attachment` add column `headers` longtext DEFAULT NULL;
|
||||
|
@ -1,43 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "base_reposecretkey" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL UNIQUE, "secret_key" varchar(44) NOT NULL);
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS "constance_config_old";
|
||||
ALTER TABLE "constance_config" RENAME TO "constance_config_old";
|
||||
CREATE TABLE IF NOT EXISTS "constance_config" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "constance_key" varchar(255) NOT NULL UNIQUE, "value" text NULL);
|
||||
INSERT INTO "constance_config" ("id", "constance_key", "value") SELECT "id", "key", "value" FROM "constance_config_old";
|
||||
DROP TABLE "constance_config_old";
|
||||
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS "drafts_draft_old";
|
||||
ALTER TABLE "drafts_draft" RENAME TO "drafts_draft_old";
|
||||
CREATE TABLE IF NOT EXISTS "drafts_draft" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL, "username" varchar(255) NOT NULL, "origin_file_version" varchar(100) NOT NULL, "draft_file_path" varchar(1024) NOT NULL, "origin_file_uuid" char(32) NOT NULL UNIQUE, "publish_file_version" varchar(100) NULL, "status" varchar(20) NOT NULL, "origin_repo_id" varchar(36) NOT NULL);
|
||||
INSERT INTO "drafts_draft" ("id", "created_at", "updated_at", "username", "origin_file_version", "draft_file_path", "origin_file_uuid", "publish_file_version", "status", "origin_repo_id") SELECT "id", "created_at", "updated_at", "username", "origin_file_version", "draft_file_path", "origin_file_uuid", "publish_file_version", "status", "origin_repo_id" FROM "drafts_draft_old";
|
||||
DROP TABLE "drafts_draft_old";
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draft_created_at_e9f4523f" ON "drafts_draft" ("created_at");
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draft_origin_repo_id_8978ca2c" ON "drafts_draft" ("origin_repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draft_updated_at_0a144b05" ON "drafts_draft" ("updated_at");
|
||||
CREATE INDEX IF NOT EXISTS "drafts_draft_username_73e6738b" ON "drafts_draft" ("username");
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "abuse_reports_abusereport" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "reporter" text NULL, "repo_id" varchar(36) NOT NULL, "repo_name" varchar(255) NOT NULL, "file_path" text NULL, "abuse_type" varchar(255) NOT NULL, "description" text NULL, "handled" bool NOT NULL, "time" datetime NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "abuse_reports_abusereport_abuse_type_703d5335" ON "abuse_reports_abusereport" ("abuse_type");
|
||||
CREATE INDEX IF NOT EXISTS "abuse_reports_abusereport_handled_94b8304c" ON "abuse_reports_abusereport" ("handled");
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "file_participants_fileparticipant" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "uuid_id" char(32) NOT NULL REFERENCES "tags_fileuuidmap" ("uuid"));
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "file_participants_fileparticipant_uuid_id_username_c747dd36_uniq" ON "file_participants_fileparticipant" ("uuid_id", "username");
|
||||
CREATE INDEX IF NOT EXISTS "file_participants_fileparticipant_uuid_id_861b7339" ON "file_participants_fileparticipant" ("uuid_id");
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "repo_share_invitation" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "path" text NOT NULL, "permission" varchar(50) NOT NULL, "invitation_id" integer NOT NULL REFERENCES "invitations_invitation" ("id"));
|
||||
CREATE INDEX IF NOT EXISTS "repo_share_invitation_repo_id_7bcf84fa" ON "repo_share_invitation" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "repo_share_invitation_invitation_id_b71effd2" ON "repo_share_invitation" ("invitation_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "repo_api_tokens" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "app_name" varchar(255) NOT NULL, "token" varchar(40) NOT NULL UNIQUE, "generated_at" datetime NOT NULL, "generated_by" varchar(255) NOT NULL, "last_access" datetime NOT NULL, "permission" varchar(15) NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "repo_api_tokens_repo_id_47a50fef" ON "repo_api_tokens" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "repo_api_tokens_app_name_7c395c31" ON "repo_api_tokens" ("app_name");
|
||||
|
||||
ALTER TABLE "post_office_attachment" add column "headers" text DEFAULT NULL;
|
||||
|
@ -1,4 +0,0 @@
|
||||
ALTER TABLE `VirusFile` ADD COLUMN `has_ignored` TINYINT(1) NOT NULL DEFAULT 0;
|
||||
ALTER TABLE `VirusFile` CHANGE `has_handle` `has_deleted` TINYINT(1);
|
||||
ALTER TABLE `VirusFile` ADD INDEX `has_deleted` (`has_deleted`);
|
||||
ALTER TABLE `VirusFile` ADD INDEX `has_ignored` (`has_ignored`);
|
@ -1,57 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS `ocm_share` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`shared_secret` varchar(36) NOT NULL,
|
||||
`from_user` varchar(255) NOT NULL,
|
||||
`to_user` varchar(255) NOT NULL,
|
||||
`to_server_url` varchar(200) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`repo_name` varchar(255) NOT NULL,
|
||||
`permission` varchar(50) NOT NULL,
|
||||
`path` longtext NOT NULL,
|
||||
`ctime` datetime(6) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `shared_secret` (`shared_secret`),
|
||||
KEY `ocm_share_from_user_7fbb7bb6` (`from_user`),
|
||||
KEY `ocm_share_to_user_4e255523` (`to_user`),
|
||||
KEY `ocm_share_to_server_url_43f0e89b` (`to_server_url`),
|
||||
KEY `ocm_share_repo_id_51937581` (`repo_id`)
|
||||
) ENGINE = InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `ocm_share_received` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`shared_secret` varchar(36) NOT NULL,
|
||||
`from_user` varchar(255) NOT NULL,
|
||||
`to_user` varchar(255) NOT NULL,
|
||||
`from_server_url` varchar(200) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`repo_name` varchar(255) NOT NULL,
|
||||
`permission` varchar(50) NOT NULL,
|
||||
`path` longtext NOT NULL,
|
||||
`provider_id` varchar(40) NOT NULL,
|
||||
`ctime` datetime(6) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `shared_secret` (`shared_secret`),
|
||||
KEY `ocm_share_received_from_user_8137d8eb` (`from_user`),
|
||||
KEY `ocm_share_received_to_user_0921d09a` (`to_user`),
|
||||
KEY `ocm_share_received_from_server_url_10527b80` (`from_server_url`),
|
||||
KEY `ocm_share_received_repo_id_9e77a1b9` (`repo_id`),
|
||||
KEY `ocm_share_received_provider_id_60c873e0` (`provider_id`)
|
||||
) ENGINE = InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `repo_auto_delete` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`days` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `repo_id` (`repo_id`)
|
||||
) ENGINE = InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `external_department` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`group_id` int(11) NOT NULL,
|
||||
`provider` varchar(32) NOT NULL,
|
||||
`outer_id` bigint(20) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `group_id` (`group_id`),
|
||||
UNIQUE KEY `external_department_provider_outer_id_8dns6vkw_uniq` (`provider`,`outer_id`)
|
||||
) ENGINE = InnoDB DEFAULT CHARSET=utf8;
|
@ -1,9 +0,0 @@
|
||||
DROP TABLE IF EXISTS "VirusFile_old";
|
||||
ALTER TABLE "VirusFile" RENAME TO "VirusFile_old";
|
||||
CREATE TABLE IF NOT EXISTS "VirusFile" ("vid" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "commit_id" varchar(40) NOT NULL, "file_path" text NOT NULL, "has_deleted" tinyint(1) NOT NULL, "has_ignored" TINYINT(1) NOT NULL DEFAULT 0);
|
||||
INSERT INTO "VirusFile" ("vid", "repo_id", "commit_id", "file_path", "has_deleted") SELECT "vid", "repo_id", "commit_id", "file_path", "has_handle" FROM "VirusFile_old";
|
||||
DROP TABLE "VirusFile_old";
|
||||
|
||||
CREATE INDEX IF NOT EXISTS "VirusFile_repo_id_yewnci4gd" ON "VirusFile" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "VirusFile_has_deleted_834ndyts" ON "VirusFile" ("has_deleted");
|
||||
CREATE INDEX IF NOT EXISTS "VirusFile_has_ignored_d84tvuwg" ON "VirusFile" ("has_ignored");
|
@ -1,17 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "ocm_share" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "shared_secret" varchar(36) NOT NULL UNIQUE, "from_user" varchar(255) NOT NULL, "to_user" varchar(255) NOT NULL, "to_server_url" varchar(200) NOT NULL, "repo_id" varchar(36) NOT NULL, "repo_name" varchar(255) NOT NULL, "permission" varchar(50) NOT NULL, "path" text NOT NULL, "ctime" datetime(6) NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "ocm_share_from_user_7fbb7bb6" ON "ocm_share" ("from_user");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_share_to_user_4e255523" ON "ocm_share" ("to_user");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_share_to_server_url_43f0e89b" ON "ocm_share" ("to_server_url");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_share_repo_id_51937581" ON "ocm_share" ("repo_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "ocm_share_received" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "shared_secret" varchar(36) NOT NULL UNIQUE, "from_user" varchar(255) NOT NULL, "to_user" varchar(255) NOT NULL, "from_server_url" varchar(200) NOT NULL, "repo_id" varchar(36) NOT NULL, "repo_name" varchar(255) NOT NULL, "permission" varchar(50) NOT NULL, "path" text NOT NULL, "provider_id" varchar(40) NOT NULL, "ctime" datetime(6) NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "ocm_share_received_from_user_8137d8eb" ON "ocm_share_received" ("from_user");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_share_received_to_user_0921d09a" ON "ocm_share_received" ("to_user");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_share_received_from_server_url_10527b80" ON "ocm_share_received" ("from_server_url");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_share_received_repo_id_9e77a1b9" ON "ocm_share_received" ("repo_id");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_share_received_provider_id_60c873e0" ON "ocm_share_received" ("provider_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "repo_auto_delete" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL UNIQUE, "days" integer NOT NULL);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "external_department" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "group_id" integer NOT NULL UNIQUE, "provider" varchar(32) NOT NULL, "outer_id" bigint NOT NULL);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "external_department_provider_outer_id_8dns6vkw_uniq" ON "external_department" (`provider`,`outer_id`);
|
@ -1,2 +0,0 @@
|
||||
ALTER TABLE `FileAudit` ADD INDEX `ix_FileAudit_user` (`user`);
|
||||
ALTER TABLE `FileAudit` ADD INDEX `ix_FileAudit_repo_id` (`repo_id`);
|
@ -1,2 +0,0 @@
|
||||
ALTER TABLE `RepoUserToken` ADD INDEX `RepoUserToken_token` (`token`);
|
||||
ALTER TABLE `RepoTokenPeerInfo` ADD INDEX `RepoTokenPeerInfo_peer_id` (`peer_id`);
|
@ -1,51 +0,0 @@
|
||||
ALTER TABLE `api2_tokenv2` CHANGE COLUMN `device_name` `device_name` varchar(40) CHARACTER SET 'utf8mb4' COLLATE utf8mb4_unicode_ci NOT NULL;
|
||||
|
||||
CREATE TABLE `custom_share_permission` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`name` varchar(255) NOT NULL,
|
||||
`description` varchar(500) NOT NULL,
|
||||
`permission` longtext NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `custom_share_permission_repo_id_578fe49f` (`repo_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE `ocm_via_webdav_received_shares` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`description` varchar(255) DEFAULT NULL,
|
||||
`name` varchar(255) NOT NULL,
|
||||
`owner` varchar(255) NOT NULL,
|
||||
`owner_display_name` varchar(255) DEFAULT NULL,
|
||||
`protocol_name` varchar(255) NOT NULL,
|
||||
`shared_secret` varchar(255) NOT NULL,
|
||||
`permissions` varchar(255) NOT NULL,
|
||||
`provider_id` varchar(255) NOT NULL,
|
||||
`resource_type` varchar(255) NOT NULL,
|
||||
`share_type` varchar(255) NOT NULL,
|
||||
`share_with` varchar(255) NOT NULL,
|
||||
`shared_by` varchar(255) NOT NULL,
|
||||
`shared_by_display_name` varchar(255) DEFAULT NULL,
|
||||
`ctime` datetime(6) NOT NULL,
|
||||
`is_dir` tinyint(1) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `ocm_via_webdav_share_received_owner_261eaa70` (`owner`),
|
||||
KEY `ocm_via_webdav_share_received_shared_secret_fbb6be5a` (`shared_secret`),
|
||||
KEY `ocm_via_webdav_share_received_provider_id_a55680e9` (`provider_id`),
|
||||
KEY `ocm_via_webdav_share_received_resource_type_a3c71b57` (`resource_type`),
|
||||
KEY `ocm_via_webdav_share_received_share_type_7615aaab` (`share_type`),
|
||||
KEY `ocm_via_webdav_share_received_share_with_5a23eb17` (`share_with`),
|
||||
KEY `ocm_via_webdav_share_received_shared_by_1786d580` (`shared_by`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE `onlyoffice_onlyofficedockey` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`doc_key` varchar(36) NOT NULL,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`repo_id` varchar(36) NOT NULL,
|
||||
`file_path` longtext NOT NULL,
|
||||
`repo_id_file_path_md5` varchar(100) NOT NULL,
|
||||
`created_time` datetime(6) NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `onlyoffice_onlyofficedockey_doc_key_edba1352` (`doc_key`),
|
||||
KEY `onlyoffice_onlyofficedockey_repo_id_file_path_md5_52002073` (`repo_id_file_path_md5`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
@ -1,2 +0,0 @@
|
||||
CREATE INDEX IF NOT EXISTS "ix_FileAudit_user" ON "FileAudit" ("user");
|
||||
CREATE INDEX IF NOT EXISTS "ix_FileAudit_repo_id" ON "FileAudit" ("repo_id");
|
@ -1,2 +0,0 @@
|
||||
CREATE INDEX IF NOT EXISTS "RepoUserToken_token" ON "RepoUserToken" ("token");
|
||||
CREATE INDEX IF NOT EXISTS "RepoTokenPeerInfo_peer_id" ON "RepoTokenPeerInfo" ("peer_id");
|
@ -1,15 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "custom_share_permission" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "name" varchar(255) NOT NULL, "description" varchar(500) NOT NULL, "permission" , "reporter" text NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "custom_share_permission_repo_id_578fe49f" ON "custom_share_permission" ("repo_id");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "ocm_via_webdav_received_shares" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "description" varchar(255) NULL, "name" varchar(255) NOT NULL, "owner" varchar(255) NOT NULL, "owner_display_name" varchar(255) NULL, "protocol_name" varchar(255) NOT NULL, "shared_secret" varchar(255) NOT NULL, "permissions" varchar(255) NOT NULL, "provider_id" varchar(255) NOT NULL, "resource_type" varchar(255) NOT NULL, "share_type" varchar(255) NOT NULL, "share_with" varchar(255) NOT NULL, "shared_by" varchar(255) NOT NULL, "shared_by_display_name" varchar(255) NOT NULL, "ctime" datetime NOT NULL, "is_dir" bool NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_owner_261eaa70" ON "ocm_via_webdav_received_shares" ("owner");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_shared_secret_fbb6be5a" ON "ocm_via_webdav_received_shares" ("shared_secret");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_provider_id_a55680e9" ON "ocm_via_webdav_received_shares" ("provider_id");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_resource_type_a3c71b57" ON "ocm_via_webdav_received_shares" ("resource_type");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_share_type_7615aaab" ON "ocm_via_webdav_received_shares" ("share_type");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_share_with_5a23eb17" ON "ocm_via_webdav_received_shares" ("share_with");
|
||||
CREATE INDEX IF NOT EXISTS "ocm_via_webdav_share_received_shared_by_1786d580" ON "ocm_via_webdav_received_shares" ("shared_by");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "onlyoffice_onlyofficedockey" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "doc_key" varchar(36) NOT NULL, "username" varchar(255) NOT NULL, "repo_id" varchar(36) NULL, "file_path" TEXT NOT NULL, "repo_id_file_path_md5" varchar(100) NOT NULL, "created_time" datetime NOT NULL);
|
||||
CREATE INDEX IF NOT EXISTS "onlyoffice_onlyofficedockey_doc_key_edba1352" ON "onlyoffice_onlyofficedockey" ("doc_key");
|
||||
CREATE INDEX IF NOT EXISTS "onlyoffice_onlyofficedockey_repo_id_file_path_md5_52002073" ON "onlyoffice_onlyofficedockey" ("repo_id_file_path_md5");
|
@ -1,121 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh
|
||||
UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/
|
||||
INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/
|
||||
TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_seahub_db=${TOPDIR}/seahub.db
|
||||
|
||||
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH
|
||||
|
||||
prev_version=1.2.0
|
||||
current_version=1.3.0
|
||||
|
||||
echo
|
||||
echo "-------------------------------------------------------------"
|
||||
echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}"
|
||||
echo "Press [ENTER] to contiune"
|
||||
echo "-------------------------------------------------------------"
|
||||
echo
|
||||
read dummy
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python2.7 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.7
|
||||
elif which python27 2>/dev/null 1>&2; then
|
||||
PYTHON=python27
|
||||
elif which python2.6 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.6
|
||||
elif which python26 2>/dev/null 1>&2; then
|
||||
PYTHON=python26
|
||||
else
|
||||
echo
|
||||
echo "Can't find a python executable of version 2.6 or above in PATH"
|
||||
echo "Install python 2.6+ before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function read_seafile_data_dir () {
|
||||
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
|
||||
if [[ ! -f ${seafile_ini} ]]; then
|
||||
echo "${seafile_ini} not found. Now quit"
|
||||
exit 1
|
||||
fi
|
||||
seafile_data_dir=$(cat "${seafile_ini}")
|
||||
if [[ ! -d ${seafile_data_dir} ]]; then
|
||||
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
|
||||
echo "Please check it first, or create this directory yourself."
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
check_python_executable
|
||||
read_seafile_data_dir
|
||||
|
||||
export SEAFILE_CONF_DIR=$seafile_data_dir
|
||||
|
||||
# test whether seafile server has been stopped.
|
||||
if pgrep seaf-server 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seafile server is still running !"
|
||||
echo "stop it using scripts before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seahub server is still running !"
|
||||
echo "stop it before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# run django syncdb command
|
||||
echo "------------------------------"
|
||||
echo "updating seahub database ... "
|
||||
echo
|
||||
manage_py=${INSTALLPATH}/seahub/manage.py
|
||||
pushd "${INSTALLPATH}/seahub" 2>/dev/null 1>&2
|
||||
if ! $PYTHON manage.py syncdb 2>/dev/null 1>&2; then
|
||||
echo "failed"
|
||||
exit -1
|
||||
fi
|
||||
popd 2>/dev/null 1>&2
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
||||
|
||||
echo "------------------------------"
|
||||
echo "migrating avatars ..."
|
||||
echo
|
||||
media_dir=${INSTALLPATH}/seahub/media
|
||||
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
|
||||
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
|
||||
|
||||
# move "media/avatars" directory outside
|
||||
if [[ ! -d ${dest_avatar_dir} ]]; then
|
||||
mkdir -p "${TOPDIR}/seahub-data"
|
||||
mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
|
||||
elif [[ ! -L ${orig_avatar_dir} ]]; then
|
||||
mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
rm -rf "${orig_avatar_dir}"
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
@ -1,119 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh
|
||||
UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/
|
||||
INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/
|
||||
TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_seahub_db=${TOPDIR}/seahub.db
|
||||
|
||||
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH
|
||||
|
||||
prev_version=1.3
|
||||
current_version=1.4.0
|
||||
|
||||
echo
|
||||
echo "-------------------------------------------------------------"
|
||||
echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}"
|
||||
echo "Press [ENTER] to contiune"
|
||||
echo "-------------------------------------------------------------"
|
||||
echo
|
||||
read dummy
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python2.7 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.7
|
||||
elif which python27 2>/dev/null 1>&2; then
|
||||
PYTHON=python27
|
||||
elif which python2.6 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.6
|
||||
elif which python26 2>/dev/null 1>&2; then
|
||||
PYTHON=python26
|
||||
else
|
||||
echo
|
||||
echo "Can't find a python executable of version 2.6 or above in PATH"
|
||||
echo "Install python 2.6+ before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function read_seafile_data_dir () {
|
||||
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
|
||||
if [[ ! -f ${seafile_ini} ]]; then
|
||||
echo "${seafile_ini} not found. Now quit"
|
||||
exit 1
|
||||
fi
|
||||
seafile_data_dir=$(cat "${seafile_ini}")
|
||||
if [[ ! -d ${seafile_data_dir} ]]; then
|
||||
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
|
||||
echo "Please check it first, or create this directory yourself."
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
check_python_executable
|
||||
read_seafile_data_dir
|
||||
|
||||
export SEAFILE_CONF_DIR=$seafile_data_dir
|
||||
|
||||
# test whether seafile server has been stopped.
|
||||
if pgrep seaf-server 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seafile server is still running !"
|
||||
echo "stop it using scripts before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seahub server is still running !"
|
||||
echo "stop it before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "------------------------------"
|
||||
echo "migrating avatars ..."
|
||||
echo
|
||||
media_dir=${INSTALLPATH}/seahub/media
|
||||
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
|
||||
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
|
||||
|
||||
# move "media/avatars" directory outside
|
||||
if [[ ! -d ${dest_avatar_dir} ]]; then
|
||||
mkdir -p "${TOPDIR}/seahub-data"
|
||||
mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
|
||||
elif [[ ! -L ${orig_avatar_dir} ]]; then
|
||||
mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
rm -rf "${orig_avatar_dir}"
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
||||
|
||||
# update database
|
||||
echo "------------------------------"
|
||||
echo "updating seahub database ... "
|
||||
echo
|
||||
|
||||
db_update_py=$UPGRADE_DIR/db_update_1.3_1.4.py
|
||||
if ! $PYTHON $db_update_py $default_seahub_db 1>/dev/null; then
|
||||
echo "failed"
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
@ -1,106 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh
|
||||
UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/
|
||||
INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/
|
||||
TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_seahub_db=${TOPDIR}/seahub.db
|
||||
|
||||
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH
|
||||
|
||||
prev_version=1.4
|
||||
current_version=1.5
|
||||
|
||||
echo
|
||||
echo "-------------------------------------------------------------"
|
||||
echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}"
|
||||
echo "Press [ENTER] to contiune"
|
||||
echo "-------------------------------------------------------------"
|
||||
echo
|
||||
read dummy
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python2.7 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.7
|
||||
elif which python27 2>/dev/null 1>&2; then
|
||||
PYTHON=python27
|
||||
elif which python2.6 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.6
|
||||
elif which python26 2>/dev/null 1>&2; then
|
||||
PYTHON=python26
|
||||
else
|
||||
echo
|
||||
echo "Can't find a python executable of version 2.6 or above in PATH"
|
||||
echo "Install python 2.6+ before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function read_seafile_data_dir () {
|
||||
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
|
||||
if [[ ! -f ${seafile_ini} ]]; then
|
||||
echo "${seafile_ini} not found. Now quit"
|
||||
exit 1
|
||||
fi
|
||||
seafile_data_dir=$(cat "${seafile_ini}")
|
||||
if [[ ! -d ${seafile_data_dir} ]]; then
|
||||
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
|
||||
echo "Please check it first, or create this directory yourself."
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
check_python_executable
|
||||
read_seafile_data_dir
|
||||
|
||||
export SEAFILE_CONF_DIR=$seafile_data_dir
|
||||
|
||||
# test whether seafile server has been stopped.
|
||||
if pgrep seaf-server 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seafile server is still running !"
|
||||
echo "stop it using scripts before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seahub server is still running !"
|
||||
echo "stop it before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------"
|
||||
echo "migrating avatars ..."
|
||||
echo
|
||||
media_dir=${INSTALLPATH}/seahub/media
|
||||
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
|
||||
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
|
||||
|
||||
# move "media/avatars" directory outside
|
||||
if [[ ! -d ${dest_avatar_dir} ]]; then
|
||||
mkdir -p "${TOPDIR}/seahub-data"
|
||||
mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
|
||||
elif [[ ! -L ${orig_avatar_dir} ]]; then
|
||||
mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
rm -rf "${orig_avatar_dir}"
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
@ -1,122 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh
|
||||
UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/
|
||||
INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/
|
||||
TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_seahub_db=${TOPDIR}/seahub.db
|
||||
|
||||
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH
|
||||
|
||||
prev_version=1.5
|
||||
current_version=1.6
|
||||
|
||||
echo
|
||||
echo "-------------------------------------------------------------"
|
||||
echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}"
|
||||
echo "Press [ENTER] to contiune"
|
||||
echo "-------------------------------------------------------------"
|
||||
echo
|
||||
read dummy
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python2.7 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.7
|
||||
elif which python27 2>/dev/null 1>&2; then
|
||||
PYTHON=python27
|
||||
elif which python2.6 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.6
|
||||
elif which python26 2>/dev/null 1>&2; then
|
||||
PYTHON=python26
|
||||
else
|
||||
echo
|
||||
echo "Can't find a python executable of version 2.6 or above in PATH"
|
||||
echo "Install python 2.6+ before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function read_seafile_data_dir () {
|
||||
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
|
||||
if [[ ! -f ${seafile_ini} ]]; then
|
||||
echo "${seafile_ini} not found. Now quit"
|
||||
exit 1
|
||||
fi
|
||||
seafile_data_dir=$(cat "${seafile_ini}")
|
||||
if [[ ! -d ${seafile_data_dir} ]]; then
|
||||
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
|
||||
echo "Please check it first, or create this directory yourself."
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
check_python_executable
|
||||
read_seafile_data_dir
|
||||
|
||||
export SEAFILE_CONF_DIR=$seafile_data_dir
|
||||
|
||||
# test whether seafile server has been stopped.
|
||||
if pgrep seaf-server 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seafile server is still running !"
|
||||
echo "stop it using scripts before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
elif pgrep -f "manage.py run_gunicorn" 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seahub server is still running !"
|
||||
echo "stop it before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------"
|
||||
echo "migrating avatars ..."
|
||||
echo
|
||||
media_dir=${INSTALLPATH}/seahub/media
|
||||
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
|
||||
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
|
||||
|
||||
# move "media/avatars" directory outside
|
||||
if [[ ! -d ${dest_avatar_dir} ]]; then
|
||||
mkdir -p "${TOPDIR}/seahub-data"
|
||||
mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
|
||||
elif [[ ! -L ${orig_avatar_dir} ]]; then
|
||||
mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
rm -rf "${orig_avatar_dir}"
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
||||
|
||||
echo
|
||||
echo "------------------------------"
|
||||
echo "Updating seahub database ..."
|
||||
echo
|
||||
|
||||
seahub_db=${TOPDIR}/seahub.db
|
||||
seahub_sql=${UPGRADE_DIR}/sql/1.6.0/sqlite3/seahub.sql
|
||||
if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then
|
||||
echo "Failed to update seahub database"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
@ -1,137 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh
|
||||
UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/
|
||||
INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/
|
||||
TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_seahub_db=${TOPDIR}/seahub.db
|
||||
|
||||
manage_py=${INSTALLPATH}/seahub/manage.py
|
||||
|
||||
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH
|
||||
|
||||
prev_version=1.6
|
||||
current_version=1.7
|
||||
|
||||
echo
|
||||
echo "-------------------------------------------------------------"
|
||||
echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}"
|
||||
echo "Press [ENTER] to contiune"
|
||||
echo "-------------------------------------------------------------"
|
||||
echo
|
||||
read dummy
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python2.7 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.7
|
||||
elif which python27 2>/dev/null 1>&2; then
|
||||
PYTHON=python27
|
||||
elif which python2.6 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.6
|
||||
elif which python26 2>/dev/null 1>&2; then
|
||||
PYTHON=python26
|
||||
else
|
||||
echo
|
||||
echo "Can't find a python executable of version 2.6 or above in PATH"
|
||||
echo "Install python 2.6+ before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function read_seafile_data_dir () {
|
||||
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
|
||||
if [[ ! -f ${seafile_ini} ]]; then
|
||||
echo "${seafile_ini} not found. Now quit"
|
||||
exit 1
|
||||
fi
|
||||
seafile_data_dir=$(cat "${seafile_ini}")
|
||||
if [[ ! -d ${seafile_data_dir} ]]; then
|
||||
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
|
||||
echo "Please check it first, or create this directory yourself."
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
check_python_executable
|
||||
read_seafile_data_dir
|
||||
|
||||
export SEAFILE_CONF_DIR=$seafile_data_dir
|
||||
|
||||
# test whether seafile server has been stopped.
|
||||
if pgrep seaf-server 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seafile server is still running !"
|
||||
echo "stop it using scripts before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seahub server is still running !"
|
||||
echo "stop it before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seahub server is still running !"
|
||||
echo "stop it before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------"
|
||||
echo "migrating avatars ..."
|
||||
echo
|
||||
media_dir=${INSTALLPATH}/seahub/media
|
||||
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
|
||||
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
|
||||
|
||||
# move "media/avatars" directory outside
|
||||
if [[ ! -d ${dest_avatar_dir} ]]; then
|
||||
mkdir -p "${TOPDIR}/seahub-data"
|
||||
mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
|
||||
elif [[ ! -L ${orig_avatar_dir} ]]; then
|
||||
mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
rm -rf "${orig_avatar_dir}"
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
||||
|
||||
echo
|
||||
echo "------------------------------"
|
||||
echo "Updating seafile/seahub database ..."
|
||||
echo
|
||||
|
||||
seahub_db=${TOPDIR}/seahub.db
|
||||
seahub_sql=${UPGRADE_DIR}/sql/1.7.0/sqlite3/seahub.sql
|
||||
if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then
|
||||
echo "Failed to update seahub database"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
seafile_db=${seafile_data_dir}/seafile.db
|
||||
seafile_sql=${UPGRADE_DIR}/sql/1.7.0/sqlite3/seafile.sql
|
||||
if ! sqlite3 "${seafile_db}" < "${seafile_sql}"; then
|
||||
echo "Failed to update seafile database"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
@ -1,130 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh
|
||||
UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/
|
||||
INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/
|
||||
TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/
|
||||
default_ccnet_conf_dir=${TOPDIR}/ccnet
|
||||
default_seafile_data_dir=${TOPDIR}/seafile-data
|
||||
default_seahub_db=${TOPDIR}/seahub.db
|
||||
|
||||
manage_py=${INSTALLPATH}/seahub/manage.py
|
||||
|
||||
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
|
||||
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH
|
||||
|
||||
prev_version=1.7
|
||||
current_version=1.8
|
||||
|
||||
echo
|
||||
echo "-------------------------------------------------------------"
|
||||
echo "This script would upgrade your seafile server from ${prev_version} to ${current_version}"
|
||||
echo "Press [ENTER] to contiune"
|
||||
echo "-------------------------------------------------------------"
|
||||
echo
|
||||
read dummy
|
||||
|
||||
function check_python_executable() {
|
||||
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if which python2.7 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.7
|
||||
elif which python27 2>/dev/null 1>&2; then
|
||||
PYTHON=python27
|
||||
elif which python2.6 2>/dev/null 1>&2; then
|
||||
PYTHON=python2.6
|
||||
elif which python26 2>/dev/null 1>&2; then
|
||||
PYTHON=python26
|
||||
else
|
||||
echo
|
||||
echo "Can't find a python executable of version 2.6 or above in PATH"
|
||||
echo "Install python 2.6+ before continue."
|
||||
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function read_seafile_data_dir () {
|
||||
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
|
||||
if [[ ! -f ${seafile_ini} ]]; then
|
||||
echo "${seafile_ini} not found. Now quit"
|
||||
exit 1
|
||||
fi
|
||||
seafile_data_dir=$(cat "${seafile_ini}")
|
||||
if [[ ! -d ${seafile_data_dir} ]]; then
|
||||
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
|
||||
echo "Please check it first, or create this directory yourself."
|
||||
echo ""
|
||||
exit 1;
|
||||
fi
|
||||
}
|
||||
|
||||
check_python_executable
|
||||
read_seafile_data_dir
|
||||
|
||||
export SEAFILE_CONF_DIR=$seafile_data_dir
|
||||
|
||||
# test whether seafile server has been stopped.
|
||||
if pgrep seaf-server 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seafile server is still running !"
|
||||
echo "stop it using scripts before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
elif pgrep -f "${manage_py} run_gunicorn" 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seahub server is still running !"
|
||||
echo "stop it before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
elif pgrep -f "${manage_py} run_fcgi" 2>/dev/null 1>&2 ; then
|
||||
echo
|
||||
echo "seahub server is still running !"
|
||||
echo "stop it before upgrade."
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------"
|
||||
echo "migrating avatars ..."
|
||||
echo
|
||||
media_dir=${INSTALLPATH}/seahub/media
|
||||
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
|
||||
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
|
||||
|
||||
# move "media/avatars" directory outside
|
||||
if [[ ! -d ${dest_avatar_dir} ]]; then
|
||||
mkdir -p "${TOPDIR}/seahub-data"
|
||||
mv "${orig_avatar_dir}" "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
|
||||
elif [[ ! -L ${orig_avatar_dir} ]]; then
|
||||
mv ${orig_avatar_dir}/* "${dest_avatar_dir}" 2>/dev/null 1>&2
|
||||
rm -rf "${orig_avatar_dir}"
|
||||
ln -s ../../../seahub-data/avatars ${media_dir}
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
||||
|
||||
echo
|
||||
echo "------------------------------"
|
||||
echo "Updating seafile/seahub database ..."
|
||||
echo
|
||||
|
||||
seahub_db=${TOPDIR}/seahub.db
|
||||
seahub_sql=${UPGRADE_DIR}/sql/1.8.0/sqlite3/seahub.sql
|
||||
if ! sqlite3 "${seahub_db}" < "${seahub_sql}"; then
|
||||
echo "Failed to update seahub database"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "DONE"
|
||||
echo "------------------------------"
|
||||
echo
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user