Remove server code.

pull/1786/merge
Jonathan Xu 2016-12-16 19:35:23 +08:00
parent f2e3bdc8dc
commit 95cf1b8b40
303 changed files with 18 additions and 56179 deletions

View File

@ -1,41 +1,9 @@
MAKE_CLIENT =
SUBDIRS = include lib common daemon app doc
if COMPILE_CLIENT
MAKE_CLIENT += daemon
endif
if WIN32
MAKE_CONTROLLER =
else
MAKE_CONTROLLER = controller
endif
if COMPILE_FUSE
MAKE_FUSE = fuse
else
MAKE_FUSE =
endif
if COMPILE_SERVER
MAKE_SERVER = server tools $(MAKE_CONTROLLER) $(MAKE_FUSE)
endif
SUBDIRS = include lib common app python tests doc \
$(MAKE_CLIENT) $(MAKE_SERVER)
DIST_SUBDIRS = include lib common app python tests \
daemon server tools controller \
doc fuse
INTLTOOL = \
intltool-extract.in \
intltool-merge.in \
intltool-update.in
DIST_SUBDIRS = include lib common app daemon doc
EXTRA_DIST = install-sh $(INTLTOOL) README.markdown scripts debian msi LICENSE.txt
DISTCHECK_CONFIGURE_FLAGS = --enable-server
ACLOCAL_AMFLAGS = -I m4
dist-hook:

View File

@ -1,20 +1,3 @@
AM_CFLAGS = -DPKGDATADIR=\"$(pkgdatadir)\" \
-DPACKAGE_DATA_DIR=\""$(pkgdatadir)"\" \
-I$(top_srcdir)/include \
-I$(top_srcdir)/common \
-I$(top_srcdir)/lib \
@CCNET_CFLAGS@ \
@GLIB2_CFLAGS@ \
@MSVC_CFLAGS@ \
-Wall
if COMPILE_CLIENT
bin_SCRIPTS = seaf-cli
endif
# monitor_tool_SOURCES = monitor-tool.c
# monitor_tool_LDADD = @CCNET_CFLAGS@ \
# -lsearpc \
# @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ -lrt -luuid -lsqlite3
EXTRA_DIST = seaf-cli

View File

@ -1,227 +0,0 @@
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <errno.h>
#include <string.h>
#include <searpc-client.h>
#include <ccnet.h>
#include <searpc-transport.h>
struct cmd {
char *name;
int (*handler) (int argc, char **argv);
};
static int add_server (int, char **);
static int del_server (int, char **);
static int list_servers (int, char **);
static struct cmd cmdtab[] = {
{ "add-server", add_server },
{ "del-server", del_server },
{ "list-servers", list_servers },
{ 0 },
};
CcnetClient *client;
SearpcClient *rpc_client;
SearpcUserPriv priv;
struct cmd *
getcmd (char *name)
{
char *p, *q;
struct cmd *c, *found;
int nmatches, longest;
longest = 0;
nmatches = 0;
found = 0;
for (c = cmdtab; (p = c->name); c++) {
for (q = name; *q == *p++; q++)
if (*q == 0) /* exact match? */
return c;
if (!*q) { /* the name was a prefix */
if (q - name > longest) {
longest = q - name;
nmatches = 1;
found = c;
} else if (q - name == longest)
nmatches++;
}
}
if (nmatches > 1)
return (struct cmd *)-1;
return found;
}
void usage()
{
fputs (
"Usage: seaf-server [--version ] [-c CONF_DIR] COMMAND [ARGS]\n"
"\n"
"Available commands are:\n"
" add-server Add a chunk server\n"
" del-server Delete a chunk server\n"
" list-servers List current chunk servers\n"
,stderr);
}
void show_version()
{
fputs ("seafile version: 0.1\n", stderr);
}
SEARPC_CLIENT_DEFUN_INT__STRING(monitor_add_chunk_server)
SEARPC_CLIENT_DEFUN_INT__STRING(monitor_del_chunk_server)
SEARPC_CLIENT_DEFUN_STRING__VOID(monitor_list_chunk_servers)
static gboolean print_version = FALSE;
static char *config_dir = NULL;
static char *central_config_dir = NULL;
static GOptionEntry entries[] =
{
{ "version", 0, 0, G_OPTION_ARG_NONE, &print_version, "show version", NULL },
{ "config-file", 'c', 0, G_OPTION_ARG_STRING, &config_dir,
"ccnet configuration directory", NULL },
{ "server-conf-dir", 'F', 0, G_OPTION_ARG_STRING, &central_config_dir,
"server configuration directory", NULL },
{ NULL },
};
int main (int argc, char *argv[])
{
struct cmd *c;
g_type_init ();
config_dir = DEFAULT_CONFIG_DIR;
if (argc == 1) {
usage();
exit(1);
}
GError *error = NULL;
GOptionContext *context;
context = g_option_context_new (NULL);
g_option_context_add_main_entries (context, entries, "seafile");
if (!g_option_context_parse (context, &argc, &argv, &error))
{
g_print ("option parsing failed: %s\n", error->message);
exit (1);
}
if (print_version) {
show_version();
exit(1);
}
if (argc <= 1) {
usage();
exit(1);
}
c = getcmd (argv[1]);
if (c == NULL) {
usage();
exit(1);
}
client = ccnet_client_new ();
if ( (ccnet_client_load_confdir(client, central_config_dir, config_dir)) < 0 ) {
fprintf (stderr, "Read config dir error\n");
exit(1);
}
if (ccnet_client_connect_daemon(client, CCNET_CLIENT_SYNC) < 0)
{
fprintf(stderr, "Connect to server fail: %s\n", strerror(errno));
exit(1);
}
priv.session = client;
priv.peer_id = NULL;
priv.service = "monitor";
rpc_client = searpc_client_new ();
rpc_client->transport = searpc_transport_send;
rpc_client->arg = &priv;
argc -= 2;
argv += 2;
c->handler (argc, argv);
ccnet_client_disconnect_daemon (client);
return 0;
}
static int add_server (int argc, char **argv)
{
char *server_id;
GError *error = NULL;
if (argc != 1) {
fprintf (stderr, "monitor-tool add-server <peer id | peer name>\n");
return -1;
}
server_id = argv[0];
if (monitor_add_chunk_server (rpc_client, server_id, &error) < 0) {
fprintf (stderr, "Failed to add chunk server %s.\n", server_id);
return -1;
}
printf ("Added chunk server %s.\n", server_id);
return 0;
}
static int del_server (int argc, char **argv)
{
char *server_id;
GError *error = NULL;
if (argc != 1) {
fprintf (stderr, "monitor-tool del-server <peer id | peer name>\n");
return -1;
}
server_id = argv[0];
if (monitor_del_chunk_server (rpc_client, server_id, &error) < 0) {
fprintf (stderr, "Failed to delete chunk server %s.\n", server_id);
return -1;
}
printf ("Deleted chunk server %s.\n", server_id);
return 0;
}
static int list_servers (int argc, char **argv)
{
GError *error = NULL;
char *list = NULL;
list = monitor_list_chunk_servers (rpc_client, &error);
if (!list) {
fprintf (stderr, "%s\n", error->message);
return -1;
}
printf ("%s", list);
return 0;
}

View File

@ -59,94 +59,6 @@ else
AC_MSG_RESULT(no)
fi
# test which sub-component to compile
if test "$bwin32" = true; then
compile_client=yes
compile_cli=no
compile_tools=no
compile_server=no
fi
if test "$bmac" = true; then
compile_client=yes
compile_cli=no
compile_tools=no
compile_server=no
fi
if test "$blinux" = true; then
compile_cli=yes
compile_tools=yes
compile_server=no
# AC_ARG_ENABLE(seablock, AC_HELP_STRING([--enable-seablock],
# [enable seablock]), [compile_seablock=$enableval],
# [compile_seablock="no"])
AC_ARG_ENABLE(riak, AC_HELP_STRING([--enable-riak], [enable riak backend]),
[compile_riak=$enableval],[compile_riak="no"])
fi
if test "$bwin32" != true; then
AC_ARG_ENABLE(fuse, AC_HELP_STRING([--enable-fuse], [enable fuse virtual file system]),
[compile_fuse=$enableval],[compile_fuse="yes"])
fi
AC_ARG_ENABLE(client, AC_HELP_STRING([--enable-client], [enable client]),
[compile_client=$enableval],[compile_client="yes"])
AC_ARG_ENABLE(server, AC_HELP_STRING([--enable-server], [enable server]),
[compile_server=$enableval],[compile_server="no"])
AC_ARG_ENABLE(python,
AC_HELP_STRING([--enable-python],[build ccnet python binding]),
[compile_python=$enableval],
[compile_python=yes])
AC_ARG_ENABLE(server-pkg, AC_HELP_STRING([--enable-server-pkg], [enable static compile]),
[server_pkg=$enableval],[server_pkg="no"])
AM_CONDITIONAL([SERVER_ONLY], [test "${server_pkg}" = "yes"])
AC_ARG_ENABLE(static-build, AC_HELP_STRING([--enable-static-build], [enable static compile]),
[static_comp=$enableval],[static_comp="no"])
if test x${static_comp} = xyes; then
STATIC_COMPILE=-static
fi
AC_SUBST(STATIC_COMPILE)
# If we're building server release package, set the run-time path
# for the executables. So that the loader will lookup shared libs
# in 'lib' dir of the release package.
# Read "man ld.so" for description of $ORIGIN.
# Refer to http://blog.linuxgamepublishing.com/2009/02/08/our-new-way-to-meet-the-lgpl/
if test x${server_pkg} = xyes; then
compile_client=no
compile_cli=yes
compile_tools=yes
compile_server=yes
SERVER_PKG_RPATH=-Wl,-R,\'\$\$ORIGIN/../lib\'
SERVER_PKG_PY_RPATH=-Wl,-R,\'\$\$ORIGIN/../../..\'
fi
AC_SUBST(SERVER_PKG_RPATH)
AC_SUBST(SERVER_PKG_PY_RPATH)
AM_CONDITIONAL([COMPILE_CLI], [test "${compile_cli}" = "yes"])
AM_CONDITIONAL([COMPILE_TOOLS], [test "${compile_tools}" = "yes"])
AM_CONDITIONAL([COMPILE_PYTHON], [test "${compile_python}" = "yes"])
AM_CONDITIONAL([COMPILE_CLIENT], [test "${compile_client}" = "yes"])
AM_CONDITIONAL([COMPILE_SERVER], [test "${compile_server}" = "yes"])
#AM_CONDITIONAL([COMPILE_SEABLOCK], [test "${compile_seablock}" = "yes"])
AM_CONDITIONAL([COMPILE_RIAK], [test "${compile_riak}" = "yes"])
AM_CONDITIONAL([COMPILE_FUSE], [test "${compile_fuse}" = "yes"])
AM_CONDITIONAL([WIN32], [test "$bwin32" = "true"])
AM_CONDITIONAL([MACOS], [test "$bmac" = "true"])
AM_CONDITIONAL([LINUX], [test "$blinux" = "true"])
@ -243,10 +155,7 @@ GLIB_REQUIRED=2.16.0
CCNET_REQUIRED=0.9.3
SEARPC_REQUIRED=1.0
JANSSON_REQUIRED=2.2.1
ZDB_REQUIRED=2.10
#LIBNAUTILUS_EXTENSION_REQUIRED=2.30.1
CURL_REQUIRED=7.17
FUSE_REQUIRED=2.7.3
ZLIB_REQUIRED=1.2.0
PKG_CHECK_MODULES(SSL, [openssl])
@ -281,56 +190,22 @@ PKG_CHECK_MODULES(ZLIB, [zlib >= $ZLIB_REQUIRED])
AC_SUBST(ZLIB_CFLAGS)
AC_SUBST(ZLIB_LIBS)
if test x${compile_python} = xyes; then
AM_PATH_PYTHON([2.6])
PKG_CHECK_MODULES(CURL, [libcurl >= $CURL_REQUIRED])
AC_SUBST(CURL_CFLAGS)
AC_SUBST(CURL_LIBS)
if test "$bwin32" = true; then
# set pyexecdir to somewhere like /c/Python26/Lib/site-packages
pyexecdir=${PYTHON_DIR}/Lib/site-packages
pythondir=${pyexecdir}
pkgpyexecdir=${pyexecdir}/${PACKAGE}
pkgpythondir=${pythondir}/${PACKAGE}
AM_PATH_PYTHON([2.6])
fi # end for bwin32
if test "$bwin32" = true; then
# set pyexecdir to somewhere like /c/Python26/Lib/site-packages
pyexecdir=${PYTHON_DIR}/Lib/site-packages
pythondir=${pyexecdir}
pkgpyexecdir=${pyexecdir}/${PACKAGE}
pkgpythondir=${pythondir}/${PACKAGE}
fi
fi # end for bwin32
# Check libzdb if compile seafile server
if test "${compile_server}" = "yes"; then
PKG_CHECK_MODULES(ZDB, [zdb >= $ZDB_REQUIRED])
AC_SUBST(ZDB_CFLAGS)
AC_SUBST(ZDB_LIBS)
fi
if test "${compile_fuse}" = "yes"; then
PKG_CHECK_MODULES(FUSE, [fuse >= $FUSE_REQUIRED])
AC_SUBST(FUSE_CFLAGS)
AC_SUBST(FUSE_LIBS)
fi
if test x${compile_server} = xyes; then
dnl check libarchive
LIBARCHIVE_REQUIRED=2.8.5
PKG_CHECK_MODULES(LIBARCHIVE, [libarchive >= $LIBARCHIVE_REQUIRED])
AC_SUBST(LIBARCHIVE_CFLAGS)
AC_SUBST(LIBARCHIVE_LIBS)
fi
if test "${compile_client}" = "yes"; then
PKG_CHECK_MODULES(CURL, [libcurl >= $CURL_REQUIRED])
AC_SUBST(CURL_CFLAGS)
AC_SUBST(CURL_LIBS)
fi
AM_CONDITIONAL([HAVE_KEYSTORAGE_GK], [test "${compile_gnome_keyring}" = "yes"])
if test "${compile_gnome_keyring}" = "yes"; then
PKG_CHECK_MODULES(GNOME_KEYRING, [gnome-keyring-1])
AC_SUBST(GNOME_KEYRING_CFLAGS)
AC_SUBST(GNOME_KEYRING_LIBS)
AC_DEFINE(HAVE_KEYSTORAGE_GK, 1, [Have Gnome-Keyring support])
fi
BPWRAPPER_REQUIRED=0.1
AC_ARG_ENABLE(breakpad, AC_HELP_STRING([--enable-breakpad], [build google breadpad support]),
[compile_breakpad=$enableval],[compile_breakpad="no"])
@ -349,37 +224,14 @@ ac_configure_args="$ac_configure_args -q"
AC_CONFIG_FILES(
Makefile
include/Makefile
fuse/Makefile
lib/Makefile
lib/libseafile.pc
common/Makefile
common/cdc/Makefile
common/index/Makefile
daemon/Makefile
server/Makefile
server/gc/Makefile
app/Makefile
python/Makefile
python/seafile/Makefile
python/seaserv/Makefile
controller/Makefile
tools/Makefile
tests/Makefile
tests/common-conf.sh
doc/Makefile
)
AC_OUTPUT
echo
echo "The following modules will be built:"
echo
if test x${compile_client} = xyes; then
echo "seaf-daemon"
fi
if test x${compile_server} = xyes; then
echo "seaf-server"
fi
echo

View File

@ -1,24 +0,0 @@
bin_PROGRAMS = seafile-controller
AM_CFLAGS = \
-DSEAFILE_SERVER \
-I$(top_srcdir)/include \
-I$(top_srcdir)/lib \
-I$(top_builddir)/lib \
-I$(top_srcdir)/common \
@CCNET_CFLAGS@ \
@SEARPC_CFLAGS@ \
@GLIB2_CFLAGS@ \
@ZDB_CFLAGS@ \
-Wall
noinst_HEADERS = seafile-controller.h ../common/log.h
seafile_controller_SOURCES = seafile-controller.c ../common/log.c
seafile_controller_LDADD = @CCNET_LIBS@ \
$(top_builddir)/lib/libseafile_common.la \
@GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ @LIBEVENT_LIBS@ \
@SEARPC_LIBS@ @JANSSON_LIBS@ @ZLIB_LIBS@
seafile_controller_LDFLAGS = @STATIC_COMPILE@ @SERVER_PKG_RPATH@

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +0,0 @@
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/*
* Seafile-controller is responsible for:
*
* 1. Start: start server processes:
*
* - ccnet-server
* - seaf-server
* - seaf-mon
*
* 2. Repair:
*
* - ensure ccnet process availability by watching client->connfd
* - ensure server processes availablity by checking process is running periodically
* If some process has stopped working, try to restart it.
*
*/
#ifndef SEAFILE_CONTROLLER_H
#define SEAFILE_CONTROLLER_H
typedef struct _SeafileController SeafileController;
enum {
PID_CCNET = 0,
PID_SERVER,
PID_SEAFDAV,
N_PID
};
// host size limit (39 charaters: max ipv6 size)
#define SEAFDAV_MAX_HOST 39
typedef struct SeafDavConfig {
gboolean enabled;
gboolean fastcgi;
int port;
// host to bind server to
char *host;
} SeafDavConfig;
struct _SeafileController {
char *central_config_dir;
char *config_dir;
char *seafile_dir;
char *logdir;
CcnetClient *client;
CcnetClient *sync_client;
CcnetMqclientProc *mqclient_proc;
guint check_process_timer;
guint client_io_id;
/* Decide whether to start seaf-server in cloud mode */
gboolean cloud_mode;
int pid[N_PID];
char *pidfile[N_PID];
SeafDavConfig seafdav_config;
};
#endif

View File

@ -14,10 +14,7 @@ AM_CFLAGS = -DPKGDATADIR=\"$(pkgdatadir)\" \
@BPWRAPPER_CFLAGS@ \
-Wall
bin_PROGRAMS =
if !SERVER_ONLY
bin_PROGRAMS += seaf-daemon
endif
bin_PROGRAMS = seaf-daemon
proc_headers = $(addprefix processors/, \
check-tx-v3-proc.h \
@ -122,10 +119,9 @@ common_src = \
seaf_daemon_SOURCES = seaf-daemon.c $(common_src)
seaf_daemon_LDADD = $(top_builddir)/lib/libseafile_common.la \
@LIB_INTL@ \
@GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ -lsqlite3 @LIBEVENT_LIBS@ \
$(top_builddir)/common/cdc/libcdc.la \
$(top_builddir)/common/index/libindex.la @LIB_WS32@ @LIB_CRYPT32@ \
@SEARPC_LIBS@ @CCNET_LIBS@ @GNOME_KEYRING_LIBS@ @JANSSON_LIBS@ @LIB_MAC@ @ZLIB_LIBS@ @CURL_LIBS@ @BPWRAPPER_LIBS@
@SEARPC_LIBS@ @CCNET_LIBS@ @JANSSON_LIBS@ @LIB_MAC@ @ZLIB_LIBS@ @CURL_LIBS@ @BPWRAPPER_LIBS@
seaf_daemon_LDFLAGS = @STATIC_COMPILE@ @CONSOLE@
seaf_daemon_LDFLAGS = @CONSOLE@

View File

@ -1,7 +1,5 @@
if COMPILE_CLIENT
CLIENT_MANUALS = seaf-daemon.1 seaf-cli.1
endif
dist_man1_MANS = $(CLIENT_MANUALS)
EXTRA_DIST = cli-readme.txt seafile-tutorial.doc Seafile-tutorial-cn.doc
EXTRA_DIST = cli-readme.txt

Binary file not shown.

View File

@ -1,20 +0,0 @@
.\" Manpage for seafile-client
.\" Contact freeplant@gmail.com to correct errors or typos.
.TH seafile 1 "31 Jan 2013" "Linux" "seafile client man page"
.SH NAME
ccnet \- the networking daemon of seafile client
.SH SYNOPSIS
ccnet [OPTIONS]
.SH DESCRIPTION
.BR ccnet
is the networking daemon of seafile client. It handles the identification and
communication with seafile server.
It's started by seafile-applet(1).
.SH SEE ALSO
seafile-applet(1), seaf-daemon(1), seafile-web(1), seaf-cli(1)
.SH AUTHOR
Lingtao Pan (freeplant@gmail.com)
.SH WEBSTIE
http://www.seafile.com
.LP
https://github.com/haiwen/seafile/

View File

@ -1,24 +0,0 @@
.\" Manpage for seafile-client
.\" Contact freeplant@gmail.com to correct errors or typos.
.TH seafile 1 "31 Jan 2013" "Linux" "seafile-client man page"
.SH NAME
seafile-applet \- start the seafile client
.SH SYNOPSIS
seafile-applet [OPTIONS]
.SH DESCRIPTION
.BR seafile-applet
is the client program of seafile. It will start
.BR ccnet(1)
,
.BR seaf-daemon(1)
,
.BR seafile-web(1)
for you
.SH SEE ALSO
ccnet(1), seaf-daemon(1), seafile-web(1), seaf-cli(1)
.SH AUTHOR
Lingtao Pan (freeplant@gmail.com)
.SH WEBSTIE
http://www.seafile.com
.LP
https://github.com/haiwen/seafile/

Binary file not shown.

View File

@ -1,46 +0,0 @@
AM_CFLAGS = -DPKGDATADIR=\"$(pkgdatadir)\" \
-DPACKAGE_DATA_DIR=\""$(pkgdatadir)"\" \
-DSEAFILE_SERVER \
-I$(top_srcdir)/include \
-I$(top_srcdir)/lib \
-I$(top_builddir)/lib \
-I$(top_srcdir)/common \
@CCNET_CFLAGS@ \
@SEARPC_CFLAGS@ \
@GLIB2_CFLAGS@ \
@ZDB_CFLAGS@ \
@FUSE_CFLAGS@ \
-Wall
bin_PROGRAMS = seaf-fuse
noinst_HEADERS = seaf-fuse.h seafile-session.h repo-mgr.h
seaf_fuse_SOURCES = seaf-fuse.c \
seafile-session.c \
file.c \
getattr.c \
readdir.c \
repo-mgr.c \
../common/block-mgr.c \
../common/block-backend.c \
../common/block-backend-fs.c \
../common/branch-mgr.c \
../common/commit-mgr.c \
../common/fs-mgr.c \
../common/log.c \
../common/seaf-db.c \
../common/seaf-utils.c \
../common/obj-store.c \
../common/obj-backend-fs.c \
../common/obj-backend-riak.c \
../common/seafile-crypt.c
seaf_fuse_LDADD = @CCNET_LIBS@ \
$(top_builddir)/lib/libseafile.la \
@GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ \
-lsqlite3 @LIBEVENT_LIBS@ \
$(top_builddir)/common/cdc/libcdc.la \
@SEARPC_LIBS@ @JANSSON_LIBS@ @ZDB_LIBS@ @FUSE_LIBS@ @ZLIB_LIBS@
seaf_fuse_LDFLAGS = @STATIC_COMPILE@ @SERVER_PKG_RPATH@

View File

@ -1,107 +0,0 @@
#include "common.h"
#define FUSE_USE_VERSION 26
#include <fuse.h>
#include <glib.h>
#include <glib-object.h>
#include <ccnet.h>
#include <seaf-db.h>
#include "log.h"
#include "utils.h"
#include "seaf-fuse.h"
int read_file(SeafileSession *seaf,
const char *store_id, int version,
Seafile *file,
char *buf, size_t size,
off_t offset, struct fuse_file_info *info)
{
BlockHandle *handle = NULL;;
BlockMetadata *bmd;
char *blkid;
char *ptr;
off_t off = 0, nleft;
int i, n, ret = -EIO;
for (i = 0; i < file->n_blocks; i++) {
blkid = file->blk_sha1s[i];
bmd = seaf_block_manager_stat_block(seaf->block_mgr, store_id, version, blkid);
if (!bmd)
return -EIO;
if (offset < off + bmd->size) {
g_free (bmd);
break;
}
off += bmd->size;
g_free (bmd);
}
/* beyond the file size */
if (i == file->n_blocks)
return 0;
nleft = size;
ptr = buf;
while (nleft > 0 && i < file->n_blocks) {
blkid = file->blk_sha1s[i];
handle = seaf_block_manager_open_block(seaf->block_mgr,
store_id, version,
blkid, BLOCK_READ);
if (!handle) {
seaf_warning ("Failed to open block %s:%s.\n", store_id, blkid);
return -EIO;
}
/* trim the offset in a block */
if (offset > off) {
char *tmp = (char *)malloc(sizeof(char) * (offset - off));
if (!tmp)
return -ENOMEM;
n = seaf_block_manager_read_block(seaf->block_mgr, handle,
tmp, offset-off);
if (n != offset - off) {
seaf_warning ("Failed to read block %s:%s.\n", store_id, blkid);
free (tmp);
goto out;
}
off += n;
free(tmp);
}
if ((n = seaf_block_manager_read_block(seaf->block_mgr,
handle, ptr, nleft)) < 0) {
seaf_warning ("Failed to read block %s:%s.\n", store_id, blkid);
goto out;
}
nleft -= n;
ptr += n;
off += n;
++i;
/* At this point we should have read all the content of the block or
* have read up to @size bytes. So it's safe to close the block.
*/
seaf_block_manager_close_block(seaf->block_mgr, handle);
seaf_block_manager_block_handle_free (seaf->block_mgr, handle);
}
return size - nleft;
out:
if (handle) {
seaf_block_manager_close_block(seaf->block_mgr, handle);
seaf_block_manager_block_handle_free (seaf->block_mgr, handle);
}
return ret;
}

View File

@ -1,190 +0,0 @@
#include "common.h"
#define FUSE_USE_VERSION 26
#include <fuse.h>
#include <glib.h>
#include <glib-object.h>
#include <ccnet.h>
#include <ccnet/ccnet-object.h>
#include <seaf-db.h>
#include "log.h"
#include "utils.h"
#include "seaf-fuse.h"
#include "seafile-session.h"
static CcnetEmailUser *get_user_from_ccnet (SearpcClient *client, const char *user)
{
return (CcnetEmailUser *)searpc_client_call__object (client,
"get_emailuser", CCNET_TYPE_EMAIL_USER, NULL,
1, "string", user);
}
static int getattr_root(SeafileSession *seaf, struct stat *stbuf)
{
stbuf->st_mode = S_IFDIR | 0755;
stbuf->st_nlink = 2;
stbuf->st_size = 4096;
return 0;
}
static int getattr_user(SeafileSession *seaf, const char *user, struct stat *stbuf)
{
SearpcClient *client;
CcnetEmailUser *emailuser;
client = ccnet_create_pooled_rpc_client (seaf->client_pool,
NULL,
"ccnet-threaded-rpcserver");
if (!client) {
seaf_warning ("Failed to alloc rpc client.\n");
return -ENOMEM;
}
emailuser = get_user_from_ccnet (client, user);
if (!emailuser) {
ccnet_rpc_client_free (client);
return -ENOENT;
}
g_object_unref (emailuser);
ccnet_rpc_client_free (client);
stbuf->st_mode = S_IFDIR | 0755;
stbuf->st_nlink = 2;
stbuf->st_size = 4096;
return 0;
}
static int getattr_repo(SeafileSession *seaf,
const char *user, const char *repo_id, const char *repo_path,
struct stat *stbuf)
{
SeafRepo *repo = NULL;
SeafBranch *branch;
SeafCommit *commit = NULL;
guint32 mode = 0;
char *id = NULL;
int ret = 0;
repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", repo_id);
ret = -ENOENT;
goto out;
}
branch = repo->head;
commit = seaf_commit_manager_get_commit(seaf->commit_mgr,
repo->id, repo->version,
branch->commit_id);
if (!commit) {
seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id);
ret = -ENOENT;
goto out;
}
id = seaf_fs_manager_path_to_obj_id(seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id,
repo_path, &mode, NULL);
if (!id) {
seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id);
ret = -ENOENT;
goto out;
}
if (S_ISDIR(mode)) {
SeafDir *dir;
GList *l;
int cnt = 2; /* '.' and '..' */
dir = seaf_fs_manager_get_seafdir(seaf->fs_mgr,
repo->store_id, repo->version, id);
if (dir) {
for (l = dir->entries; l; l = l->next)
cnt++;
}
if (strcmp (repo_path, "/") != 0) {
// get dirent of the dir
SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr,
repo->store_id,
repo->version,
commit->root_id,
repo_path, NULL);
if (dirent && repo->version != 0)
stbuf->st_mtime = dirent->mtime;
seaf_dirent_free (dirent);
}
stbuf->st_size += cnt * sizeof(SeafDirent);
stbuf->st_mode = mode | 0755;
stbuf->st_nlink = 2;
seaf_dir_free (dir);
} else if (S_ISREG(mode)) {
Seafile *file;
file = seaf_fs_manager_get_seafile(seaf->fs_mgr,
repo->store_id, repo->version, id);
if (file)
stbuf->st_size = file->file_size;
SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr,
repo->store_id,
repo->version,
commit->root_id,
repo_path, NULL);
if (dirent && repo->version != 0)
stbuf->st_mtime = dirent->mtime;
stbuf->st_mode = mode | 0644;
stbuf->st_nlink = 1;
seaf_dirent_free (dirent);
seafile_unref (file);
} else {
return -ENOENT;
}
out:
g_free (id);
seaf_repo_unref (repo);
seaf_commit_unref (commit);
return ret;
}
int do_getattr(SeafileSession *seaf, const char *path, struct stat *stbuf)
{
int n_parts;
char *user, *repo_id, *repo_path;
int ret = 0;
if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {
return -ENOENT;
}
switch (n_parts) {
case 0:
ret = getattr_root(seaf, stbuf);
break;
case 1:
ret = getattr_user(seaf, user, stbuf);
break;
case 2:
case 3:
ret = getattr_repo(seaf, user, repo_id, repo_path, stbuf);
break;
}
g_free (user);
g_free (repo_id);
g_free (repo_path);
return ret;
}

View File

@ -1,237 +0,0 @@
#include "common.h"
#define FUSE_USE_VERSION 26
#include <fuse.h>
#include <glib.h>
#include <glib-object.h>
#include <ccnet.h>
#include <ccnet/ccnet-object.h>
#include <seaf-db.h>
#include "log.h"
#include "utils.h"
#include "seaf-fuse.h"
#include "seafile-session.h"
static char *replace_slash (const char *repo_name)
{
char *ret = g_strdup(repo_name);
char *p;
for (p = ret; *p != 0; ++p)
if (*p == '/')
*p = '_';
return ret;
}
static GList *get_users_from_ccnet (SearpcClient *client, const char *source)
{
return searpc_client_call__objlist (client,
"get_emailusers", CCNET_TYPE_EMAIL_USER, NULL,
3, "string", source, "int", -1, "int", -1);
}
static CcnetEmailUser *get_user_from_ccnet (SearpcClient *client, const char *user)
{
return (CcnetEmailUser *)searpc_client_call__object (client,
"get_emailuser", CCNET_TYPE_EMAIL_USER, NULL,
1, "string", user);
}
static int readdir_root(SeafileSession *seaf,
void *buf, fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
SearpcClient *client = NULL;
GList *users, *p;
CcnetEmailUser *user;
const char *email;
GHashTable *user_hash;
int dummy;
client = ccnet_create_pooled_rpc_client (seaf->client_pool,
NULL,
"ccnet-threaded-rpcserver");
if (!client) {
seaf_warning ("Failed to alloc rpc client.\n");
return -ENOMEM;
}
user_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
users = get_users_from_ccnet (client, "DB");
for (p = users; p; p = p->next) {
user = p->data;
email = ccnet_email_user_get_email (user);
g_hash_table_insert (user_hash, g_strdup(email), &dummy);
g_object_unref (user);
}
g_list_free (users);
users = get_users_from_ccnet (client, "LDAP");
for (p = users; p; p = p->next) {
user = p->data;
email = ccnet_email_user_get_email (user);
g_hash_table_insert (user_hash, g_strdup(email), &dummy);
g_object_unref (user);
}
g_list_free (users);
users = g_hash_table_get_keys (user_hash);
for (p = users; p; p = p->next) {
email = p->data;
filler (buf, email, NULL, 0);
}
g_list_free (users);
g_hash_table_destroy (user_hash);
ccnet_rpc_client_free (client);
return 0;
}
static int readdir_user(SeafileSession *seaf, const char *user,
void *buf, fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
SearpcClient *client;
CcnetEmailUser *emailuser;
GList *list = NULL, *p;
GString *name;
client = ccnet_create_pooled_rpc_client (seaf->client_pool,
NULL,
"ccnet-threaded-rpcserver");
if (!client) {
seaf_warning ("Failed to alloc rpc client.\n");
return -ENOMEM;
}
emailuser = get_user_from_ccnet (client, user);
if (!emailuser) {
ccnet_rpc_client_free (client);
return -ENOENT;
}
g_object_unref (emailuser);
ccnet_rpc_client_free (client);
list = seaf_repo_manager_get_repos_by_owner (seaf->repo_mgr, user);
if (!list)
return 0;
for (p = list; p; p = p->next) {
SeafRepo *repo = (SeafRepo *)p->data;
/* Don't list virtual repos. */
if (seaf_repo_manager_is_virtual_repo(seaf->repo_mgr, repo->id)) {
seaf_repo_unref (repo);
continue;
}
// Don't list encrypted repo
if (repo->encrypted) {
continue;
}
char *clean_repo_name = replace_slash (repo->name);
name = g_string_new ("");
g_string_printf (name, "%s_%s", repo->id, clean_repo_name);
filler(buf, name->str, NULL, 0);
g_string_free (name, TRUE);
g_free (clean_repo_name);
seaf_repo_unref (repo);
}
g_list_free (list);
return 0;
}
static int readdir_repo(SeafileSession *seaf,
const char *user, const char *repo_id, const char *repo_path,
void *buf, fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
SeafRepo *repo = NULL;
SeafBranch *branch;
SeafCommit *commit = NULL;
SeafDir *dir = NULL;
GList *l;
int ret = 0;
repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", repo_id);
ret = -ENOENT;
goto out;
}
branch = repo->head;
commit = seaf_commit_manager_get_commit(seaf->commit_mgr,
repo->id, repo->version,
branch->commit_id);
if (!commit) {
seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id);
ret = -ENOENT;
goto out;
}
dir = seaf_fs_manager_get_seafdir_by_path(seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id,
repo_path, NULL);
if (!dir) {
seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id);
ret = -ENOENT;
goto out;
}
for (l = dir->entries; l; l = l->next) {
SeafDirent *seaf_dent = (SeafDirent *) l->data;
/* FIXME: maybe we need to return stbuf */
filler(buf, seaf_dent->name, NULL, 0);
}
out:
seaf_repo_unref (repo);
seaf_commit_unref (commit);
seaf_dir_free (dir);
return ret;
}
int do_readdir(SeafileSession *seaf, const char *path, void *buf,
fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
int n_parts;
char *user, *repo_id, *repo_path;
int ret = 0;
if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {
return -ENOENT;
}
switch (n_parts) {
case 0:
ret = readdir_root(seaf, buf, filler, offset, info);
break;
case 1:
ret = readdir_user(seaf, user, buf, filler, offset, info);
break;
case 2:
case 3:
ret = readdir_repo(seaf, user, repo_id, repo_path, buf, filler, offset, info);
break;
}
g_free (user);
g_free (repo_id);
g_free (repo_path);
return ret;
}

View File

@ -1,428 +0,0 @@
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include <glib/gstdio.h>
#include <ccnet.h>
#include "utils.h"
#include "log.h"
#include "seafile-session.h"
#include "commit-mgr.h"
#include "branch-mgr.h"
#include "repo-mgr.h"
#include "fs-mgr.h"
#include "seafile-error.h"
#include "seaf-db.h"
#define INDEX_DIR "index"
struct _SeafRepoManagerPriv {
};
static SeafRepo *
load_repo (SeafRepoManager *manager, const char *repo_id);
gboolean
is_repo_id_valid (const char *id)
{
if (!id)
return FALSE;
return is_uuid_valid (id);
}
SeafRepo*
seaf_repo_new (const char *id, const char *name, const char *desc)
{
SeafRepo* repo;
/* valid check */
repo = g_new0 (SeafRepo, 1);
memcpy (repo->id, id, 36);
repo->id[36] = '\0';
repo->name = g_strdup(name);
repo->desc = g_strdup(desc);
repo->ref_cnt = 1;
return repo;
}
void
seaf_repo_free (SeafRepo *repo)
{
if (repo->name) g_free (repo->name);
if (repo->desc) g_free (repo->desc);
if (repo->category) g_free (repo->category);
if (repo->head) seaf_branch_unref (repo->head);
g_free (repo);
}
void
seaf_repo_ref (SeafRepo *repo)
{
g_atomic_int_inc (&repo->ref_cnt);
}
void
seaf_repo_unref (SeafRepo *repo)
{
if (!repo)
return;
if (g_atomic_int_dec_and_test (&repo->ref_cnt))
seaf_repo_free (repo);
}
static void
set_head_common (SeafRepo *repo, SeafBranch *branch)
{
if (repo->head)
seaf_branch_unref (repo->head);
repo->head = branch;
seaf_branch_ref(branch);
}
void
seaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit)
{
repo->name = g_strdup (commit->repo_name);
repo->desc = g_strdup (commit->repo_desc);
repo->encrypted = commit->encrypted;
if (repo->encrypted) {
repo->enc_version = commit->enc_version;
if (repo->enc_version >= 1)
memcpy (repo->magic, commit->magic, 33);
}
repo->no_local_history = commit->no_local_history;
repo->version = commit->version;
}
void
seaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit)
{
commit->repo_name = g_strdup (repo->name);
commit->repo_desc = g_strdup (repo->desc);
commit->encrypted = repo->encrypted;
if (commit->encrypted) {
commit->enc_version = repo->enc_version;
if (commit->enc_version >= 1)
commit->magic = g_strdup (repo->magic);
}
commit->no_local_history = repo->no_local_history;
commit->version = repo->version;
}
static gboolean
collect_commit (SeafCommit *commit, void *vlist, gboolean *stop)
{
GList **commits = vlist;
/* The traverse function will unref the commit, so we need to ref it.
*/
seaf_commit_ref (commit);
*commits = g_list_prepend (*commits, commit);
return TRUE;
}
GList *
seaf_repo_get_commits (SeafRepo *repo)
{
GList *branches;
GList *ptr;
SeafBranch *branch;
GList *commits = NULL;
branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id);
if (branches == NULL) {
seaf_warning ("Failed to get branch list of repo %s.\n", repo->id);
return NULL;
}
for (ptr = branches; ptr != NULL; ptr = ptr->next) {
branch = ptr->data;
gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,
repo->id,
repo->version,
branch->commit_id,
collect_commit,
&commits,
FALSE);
if (!res) {
for (ptr = commits; ptr != NULL; ptr = ptr->next)
seaf_commit_unref ((SeafCommit *)(ptr->data));
g_list_free (commits);
goto out;
}
}
commits = g_list_reverse (commits);
out:
for (ptr = branches; ptr != NULL; ptr = ptr->next) {
seaf_branch_unref ((SeafBranch *)ptr->data);
}
return commits;
}
static int
compare_repo (const SeafRepo *srepo, const SeafRepo *trepo)
{
return g_strcmp0 (srepo->id, trepo->id);
}
SeafRepoManager*
seaf_repo_manager_new (SeafileSession *seaf)
{
SeafRepoManager *mgr = g_new0 (SeafRepoManager, 1);
mgr->priv = g_new0 (SeafRepoManagerPriv, 1);
mgr->seaf = seaf;
return mgr;
}
int
seaf_repo_manager_init (SeafRepoManager *mgr)
{
return 0;
}
int
seaf_repo_manager_start (SeafRepoManager *mgr)
{
return 0;
}
static gboolean
repo_exists_in_db (SeafDB *db, const char *id)
{
char sql[256];
gboolean db_err = FALSE;
snprintf (sql, sizeof(sql), "SELECT repo_id FROM Repo WHERE repo_id = '%s'",
id);
return seaf_db_check_for_existence (db, sql, &db_err);
}
SeafRepo*
seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id)
{
SeafRepo repo;
int len = strlen(id);
if (len >= 37)
return NULL;
memcpy (repo.id, id, len + 1);
if (repo_exists_in_db (manager->seaf->db, id)) {
SeafRepo *ret = load_repo (manager, id);
if (!ret)
return NULL;
/* seaf_repo_ref (ret); */
return ret;
}
return NULL;
}
gboolean
seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id)
{
SeafRepo repo;
memcpy (repo.id, id, 37);
return repo_exists_in_db (manager->seaf->db, id);
}
static void
load_repo_commit (SeafRepoManager *manager,
SeafRepo *repo,
SeafBranch *branch)
{
SeafCommit *commit;
commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr,
repo->id,
branch->commit_id);
if (!commit) {
seaf_warning ("Commit %s is missing\n", branch->commit_id);
repo->is_corrupted = TRUE;
return;
}
set_head_common (repo, branch);
seaf_repo_from_commit (repo, commit);
seaf_commit_unref (commit);
}
static gboolean
load_virtual_info (SeafDBRow *row, void *vrepo_id)
{
char *ret_repo_id = vrepo_id;
const char *origin_repo_id;
origin_repo_id = seaf_db_row_get_column_text (row, 0);
memcpy (ret_repo_id, origin_repo_id, 37);
return FALSE;
}
char *
get_origin_repo_id (SeafRepoManager *mgr, const char *repo_id)
{
char sql[256];
char origin_repo_id[37];
memset (origin_repo_id, 0, 37);
snprintf (sql, 256,
"SELECT origin_repo FROM VirtualRepo "
"WHERE repo_id = '%s'", repo_id);
seaf_db_foreach_selected_row (seaf->db, sql, load_virtual_info, origin_repo_id);
if (origin_repo_id[0] != 0)
return g_strdup(origin_repo_id);
else
return NULL;
}
static SeafRepo *
load_repo (SeafRepoManager *manager, const char *repo_id)
{
SeafRepo *repo;
SeafBranch *branch;
repo = seaf_repo_new(repo_id, NULL, NULL);
if (!repo) {
seaf_warning ("[repo mgr] failed to alloc repo.\n");
return NULL;
}
repo->manager = manager;
branch = seaf_branch_manager_get_branch (seaf->branch_mgr, repo_id, "master");
if (!branch) {
seaf_warning ("Failed to get master branch of repo %.8s.\n", repo_id);
repo->is_corrupted = TRUE;
} else {
load_repo_commit (manager, repo, branch);
seaf_branch_unref (branch);
}
if (repo->is_corrupted) {
seaf_warning ("Repo %.8s is corrupted.\n", repo->id);
seaf_repo_free (repo);
return NULL;
}
char *origin_repo_id = get_origin_repo_id (manager, repo->id);
if (origin_repo_id)
memcpy (repo->store_id, origin_repo_id, 36);
else
memcpy (repo->store_id, repo->id, 36);
g_free (origin_repo_id);
return repo;
}
static gboolean
collect_repo_id (SeafDBRow *row, void *data)
{
GList **p_ids = data;
const char *repo_id;
repo_id = seaf_db_row_get_column_text (row, 0);
*p_ids = g_list_prepend (*p_ids, g_strdup(repo_id));
return TRUE;
}
GList *
seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr)
{
GList *ret = NULL;
char sql[256];
snprintf (sql, 256, "SELECT repo_id FROM Repo");
if (seaf_db_foreach_selected_row (mgr->seaf->db, sql,
collect_repo_id, &ret) < 0)
return NULL;
return ret;
}
GList *
seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit)
{
GList *id_list = NULL, *ptr;
GList *ret = NULL;
SeafRepo *repo;
char sql[256];
if (start == -1 && limit == -1)
snprintf (sql, 256, "SELECT repo_id FROM Repo");
else
snprintf (sql, 256, "SELECT repo_id FROM Repo LIMIT %d, %d", start, limit);
if (seaf_db_foreach_selected_row (mgr->seaf->db, sql,
collect_repo_id, &id_list) < 0)
return NULL;
for (ptr = id_list; ptr; ptr = ptr->next) {
char *repo_id = ptr->data;
repo = seaf_repo_manager_get_repo (mgr, repo_id);
if (repo != NULL)
ret = g_list_prepend (ret, repo);
}
string_list_free (id_list);
return g_list_reverse (ret);
}
GList *
seaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr,
const char *email)
{
GList *id_list = NULL, *ptr;
GList *ret = NULL;
char sql[256];
snprintf (sql, 256, "SELECT repo_id FROM RepoOwner WHERE owner_id='%s'",
email);
if (seaf_db_foreach_selected_row (mgr->seaf->db, sql,
collect_repo_id, &id_list) < 0)
return NULL;
for (ptr = id_list; ptr; ptr = ptr->next) {
char *repo_id = ptr->data;
SeafRepo *repo = seaf_repo_manager_get_repo (mgr, repo_id);
if (repo != NULL)
ret = g_list_prepend (ret, repo);
}
string_list_free (id_list);
return ret;
}
gboolean
seaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id)
{
char sql[256];
gboolean db_err;
snprintf (sql, 256,
"SELECT 1 FROM VirtualRepo WHERE repo_id = '%s'", repo_id);
return seaf_db_check_for_existence (seaf->db, sql, &db_err);
}

View File

@ -1,100 +0,0 @@
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef SEAF_REPO_MGR_H
#define SEAF_REPO_MGR_H
#include <pthread.h>
#include "seafile-object.h"
#include "commit-mgr.h"
#include "branch-mgr.h"
struct _SeafRepoManager;
typedef struct _SeafRepo SeafRepo;
struct _SeafRepo {
struct _SeafRepoManager *manager;
gchar id[37];
gchar *name;
gchar *desc;
gchar *category; /* not used yet */
gboolean encrypted;
int enc_version;
gchar magic[33]; /* hash(repo_id + passwd), key stretched. */
gboolean no_local_history;
SeafBranch *head;
gboolean is_corrupted;
gboolean delete_pending;
int ref_cnt;
int version;
/* Used to access fs and block sotre.
* This id is different from repo_id when this repo is virtual.
* Virtual repos share fs and block store with its origin repo.
* However, commit store for each repo is always independent.
* So always use repo_id to access commit store.
*/
gchar store_id[37];
};
gboolean is_repo_id_valid (const char *id);
SeafRepo*
seaf_repo_new (const char *id, const char *name, const char *desc);
void
seaf_repo_free (SeafRepo *repo);
void
seaf_repo_ref (SeafRepo *repo);
void
seaf_repo_unref (SeafRepo *repo);
typedef struct _SeafRepoManager SeafRepoManager;
typedef struct _SeafRepoManagerPriv SeafRepoManagerPriv;
struct _SeafRepoManager {
struct _SeafileSession *seaf;
SeafRepoManagerPriv *priv;
};
SeafRepoManager*
seaf_repo_manager_new (struct _SeafileSession *seaf);
int
seaf_repo_manager_init (SeafRepoManager *mgr);
int
seaf_repo_manager_start (SeafRepoManager *mgr);
int
seaf_repo_manager_add_repo (SeafRepoManager *mgr, SeafRepo *repo);
int
seaf_repo_manager_del_repo (SeafRepoManager *mgr, SeafRepo *repo);
SeafRepo*
seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id);
gboolean
seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id);
GList*
seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit);
GList *
seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr);
GList *
seaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr,
const char *email);
gboolean
seaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id);
#endif

View File

@ -1,355 +0,0 @@
#include "common.h"
#include <unistd.h>
#include <getopt.h>
#define FUSE_USE_VERSION 26
#include <fuse.h>
#include <fuse_opt.h>
#include <glib.h>
#include <glib-object.h>
#include <ccnet.h>
#include <seaf-db.h>
#include "log.h"
#include "utils.h"
#include "seaf-fuse.h"
CcnetClient *ccnet_client = NULL;
SeafileSession *seaf = NULL;
static char *parse_repo_id (const char *repo_id_name)
{
if (strlen(repo_id_name) < 36)
return NULL;
return g_strndup(repo_id_name, 36);
}
/*
* Path format can be:
* 1. / --> list all users
* 2. /user --> list libraries owned by user
* 3. /user/repo-id_name --> list root of the library
* 4. /user/repo-id_name/repo_path --> list library content
*/
int parse_fuse_path (const char *path,
int *n_parts, char **user, char **repo_id, char **repo_path)
{
char **tokens;
int n;
int ret = 0;
*user = NULL;
*repo_id = NULL;
*repo_path = NULL;
if (*path == '/')
++path;
tokens = g_strsplit (path, "/", 3);
n = g_strv_length (tokens);
*n_parts = n;
switch (n) {
case 0:
break;
case 1:
*user = g_strdup(tokens[0]);
break;
case 2:
*repo_id = parse_repo_id(tokens[1]);
if (*repo_id == NULL) {
ret = -1;
break;
}
*user = g_strdup(tokens[0]);
*repo_path = g_strdup("/");
break;
case 3:
*repo_id = parse_repo_id(tokens[1]);
if (*repo_id == NULL) {
ret = -1;
break;
}
*user = g_strdup(tokens[0]);
*repo_path = g_strdup(tokens[2]);
break;
}
g_strfreev (tokens);
return ret;
}
static int seaf_fuse_getattr(const char *path, struct stat *stbuf)
{
memset(stbuf, 0, sizeof(struct stat));
return do_getattr(seaf, path, stbuf);
}
static int seaf_fuse_readdir(const char *path, void *buf,
fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
filler(buf, ".", NULL, 0);
filler(buf, "..", NULL, 0);
return do_readdir(seaf, path, buf, filler, offset, info);
}
static int seaf_fuse_open(const char *path, struct fuse_file_info *info)
{
int n_parts;
char *user, *repo_id, *repo_path;
SeafRepo *repo = NULL;
SeafBranch *branch = NULL;
SeafCommit *commit = NULL;
guint32 mode = 0;
int ret = 0;
/* Now we only support read-only mode */
if ((info->flags & 3) != O_RDONLY)
return -EACCES;
if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {
seaf_warning ("Invalid input path %s.\n", path);
return -ENOENT;
}
if (n_parts != 2 && n_parts != 3) {
seaf_warning ("Invalid input path for open: %s.\n", path);
ret = -EACCES;
goto out;
}
repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", repo_id);
ret = -ENOENT;
goto out;
}
branch = repo->head;
commit = seaf_commit_manager_get_commit(seaf->commit_mgr,
repo->id,
repo->version,
branch->commit_id);
if (!commit) {
seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id);
ret = -ENOENT;
goto out;
}
char *id = seaf_fs_manager_path_to_obj_id(seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id,
repo_path, &mode, NULL);
if (!id) {
seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id);
ret = -ENOENT;
goto out;
}
g_free (id);
if (!S_ISREG(mode))
return -EACCES;
out:
g_free (user);
g_free (repo_id);
g_free (repo_path);
seaf_repo_unref (repo);
seaf_commit_unref (commit);
return ret;
}
static int seaf_fuse_read(const char *path, char *buf, size_t size,
off_t offset, struct fuse_file_info *info)
{
int n_parts;
char *user, *repo_id, *repo_path;
SeafRepo *repo = NULL;
SeafBranch *branch = NULL;
SeafCommit *commit = NULL;
Seafile *file = NULL;
char *file_id = NULL;
int ret = 0;
/* Now we only support read-only mode */
if ((info->flags & 3) != O_RDONLY)
return -EACCES;
if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {
seaf_warning ("Invalid input path %s.\n", path);
return -ENOENT;
}
if (n_parts != 2 && n_parts != 3) {
seaf_warning ("Invalid input path for open: %s.\n", path);
ret = -EACCES;
goto out;
}
repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", repo_id);
ret = -ENOENT;
goto out;
}
branch = repo->head;
commit = seaf_commit_manager_get_commit(seaf->commit_mgr,
repo->id,
repo->version,
branch->commit_id);
if (!commit) {
seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id);
ret = -ENOENT;
goto out;
}
file_id = seaf_fs_manager_get_seafile_id_by_path(seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id,
repo_path, NULL);
if (!file_id) {
seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id);
ret = -ENOENT;
goto out;
}
file = seaf_fs_manager_get_seafile(seaf->fs_mgr,
repo->store_id, repo->version, file_id);
if (!file) {
ret = -ENOENT;
goto out;
}
ret = read_file(seaf, repo->store_id, repo->version,
file, buf, size, offset, info);
seafile_unref (file);
out:
g_free (user);
g_free (repo_id);
g_free (repo_path);
g_free (file_id);
seaf_repo_unref (repo);
seaf_commit_unref (commit);
return ret;
}
struct options {
char *central_config_dir;
char *config_dir;
char *seafile_dir;
char *log_file;
} options;
#define SEAF_FUSE_OPT_KEY(t, p, v) { t, offsetof(struct options, p), v }
enum {
KEY_VERSION,
KEY_HELP,
};
static struct fuse_opt seaf_fuse_opts[] = {
SEAF_FUSE_OPT_KEY("-c %s", config_dir, 0),
SEAF_FUSE_OPT_KEY("--config %s", config_dir, 0),
SEAF_FUSE_OPT_KEY("-F %s", central_config_dir, 0),
SEAF_FUSE_OPT_KEY("--central-config-dir %s", central_config_dir, 0),
SEAF_FUSE_OPT_KEY("-d %s", seafile_dir, 0),
SEAF_FUSE_OPT_KEY("--seafdir %s", seafile_dir, 0),
SEAF_FUSE_OPT_KEY("-l %s", log_file, 0),
SEAF_FUSE_OPT_KEY("--logfile %s", log_file, 0),
FUSE_OPT_KEY("-V", KEY_VERSION),
FUSE_OPT_KEY("--version", KEY_VERSION),
FUSE_OPT_KEY("-h", KEY_HELP),
FUSE_OPT_KEY("--help", KEY_HELP),
FUSE_OPT_END
};
static struct fuse_operations seaf_fuse_ops = {
.getattr = seaf_fuse_getattr,
.readdir = seaf_fuse_readdir,
.open = seaf_fuse_open,
.read = seaf_fuse_read,
};
int main(int argc, char *argv[])
{
struct fuse_args args = FUSE_ARGS_INIT(argc, argv);
const char *debug_str = NULL;
char *config_dir = DEFAULT_CONFIG_DIR;
char *central_config_dir = NULL;
char *seafile_dir = NULL;
char *logfile = NULL;
char *ccnet_debug_level_str = "info";
char *seafile_debug_level_str = "debug";
int ret;
memset(&options, 0, sizeof(struct options));
if (fuse_opt_parse(&args, &options, seaf_fuse_opts, NULL) == -1) {
seaf_warning("Parse argument Failed\n");
exit(1);
}
g_type_init();
config_dir = options.config_dir ? : DEFAULT_CONFIG_DIR;
config_dir = ccnet_expand_path (config_dir);
central_config_dir = options.central_config_dir;
if (!debug_str)
debug_str = g_getenv("SEAFILE_DEBUG");
seafile_debug_set_flags_string(debug_str);
if (!options.seafile_dir)
seafile_dir = g_build_filename(config_dir, "seafile", NULL);
else
seafile_dir = options.seafile_dir;
if (!options.log_file)
logfile = g_build_filename(seafile_dir, "seaf-fuse.log", NULL);
else
logfile = options.log_file;
if (seafile_log_init(logfile, ccnet_debug_level_str,
seafile_debug_level_str) < 0) {
fprintf(stderr, "Failed to init log.\n");
exit(1);
}
ccnet_client = ccnet_client_new();
if ((ccnet_client_load_confdir(ccnet_client, central_config_dir, config_dir)) < 0) {
seaf_warning("Read config dir error\n");
exit(1);
}
seaf = seafile_session_new(central_config_dir, seafile_dir, ccnet_client);
if (!seaf) {
seaf_warning("Failed to create seafile session.\n");
exit(1);
}
if (seafile_session_init(seaf) < 0) {
seaf_warning("Failed to init seafile session.\n");
exit(1);
}
seaf->client_pool = ccnet_client_pool_new(central_config_dir, config_dir);
if (!seaf->client_pool) {
seaf_warning("Failed to creat client pool\n");
exit(1);
}
set_syslog_config (seaf->config);
ret = fuse_main(args.argc, args.argv, &seaf_fuse_ops, NULL);
fuse_opt_free_args(&args);
return ret;
}

View File

@ -1,29 +0,0 @@
#ifndef SEAF_FUSE_H
#define SEAF_FUSE_H
#include "seafile-session.h"
int parse_fuse_path (const char *path,
int *n_parts, char **user, char **repo_id, char **repo_path);
SeafDirent *
fuse_get_dirent_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path);
/* file.c */
int read_file(SeafileSession *seaf, const char *store_id, int version,
Seafile *file, char *buf, size_t size,
off_t offset, struct fuse_file_info *info);
/* getattr.c */
int do_getattr(SeafileSession *seaf, const char *path, struct stat *stbuf);
/* readdir.c */
int do_readdir(SeafileSession *seaf, const char *path, void *buf,
fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info);
#endif /* SEAF_FUSE_H */

View File

@ -1,120 +0,0 @@
#include "common.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <ccnet.h>
#include <utils.h>
#include <locale.h>
#include "seafile-session.h"
#include "seaf-utils.h"
#include "log.h"
SeafileSession *
seafile_session_new(const char *central_config_dir,
const char *seafile_dir,
CcnetClient *ccnet_session)
{
char *abs_central_config_dir = NULL;
char *abs_seafile_dir;
char *tmp_file_dir;
char *config_file_path;
struct stat st;
GKeyFile *config;
SeafileSession *session = NULL;
if (!ccnet_session)
return NULL;
abs_seafile_dir = ccnet_expand_path (seafile_dir);
tmp_file_dir = g_build_filename(abs_seafile_dir, "tmpfiles", NULL);
if (central_config_dir) {
abs_central_config_dir = ccnet_expand_path (central_config_dir);
}
config_file_path = g_build_filename(
abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir,
"seafile.conf", NULL);
if (g_stat(abs_seafile_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {
seaf_warning ("Seafile data dir %s does not exist and is unable to create\n",
abs_seafile_dir);
goto onerror;
}
if (g_stat(tmp_file_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {
seaf_warning("Seafile tmp dir %s does not exist and is unable to create\n",
tmp_file_dir);
goto onerror;
}
GError *error = NULL;
config = g_key_file_new ();
if (!g_key_file_load_from_file (config, config_file_path,
G_KEY_FILE_NONE, &error)) {
seaf_warning ("Failed to load config file.\n");
g_key_file_free (config);
goto onerror;
}
session = g_new0(SeafileSession, 1);
session->seaf_dir = abs_seafile_dir;
session->tmp_file_dir = tmp_file_dir;
session->session = ccnet_session;
session->config = config;
if (load_database_config (session) < 0) {
seaf_warning ("Failed to load database config.\n");
goto onerror;
}
session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir);
if (!session->fs_mgr)
goto onerror;
session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir);
if (!session->block_mgr)
goto onerror;
session->commit_mgr = seaf_commit_manager_new (session);
if (!session->commit_mgr)
goto onerror;
session->repo_mgr = seaf_repo_manager_new (session);
if (!session->repo_mgr)
goto onerror;
session->branch_mgr = seaf_branch_manager_new (session);
if (!session->branch_mgr)
goto onerror;
return session;
onerror:
free (abs_seafile_dir);
g_free (config_file_path);
g_free (session);
return NULL;
}
int
seafile_session_init (SeafileSession *session)
{
if (seaf_commit_manager_init (session->commit_mgr) < 0)
return -1;
if (seaf_fs_manager_init (session->fs_mgr) < 0)
return -1;
if (seaf_branch_manager_init (session->branch_mgr) < 0)
return -1;
if (seaf_repo_manager_init (session->repo_mgr) < 0)
return -1;
return 0;
}
int
seafile_session_start (SeafileSession *session)
{
return 0;
}

View File

@ -1,52 +0,0 @@
#ifndef SEAFILE_SESSION_H
#define SEAFILE_SESSION_H
#include <stdint.h>
#include <glib.h>
#include <seaf-db.h>
#include "block-mgr.h"
#include "fs-mgr.h"
#include "branch-mgr.h"
#include "commit-mgr.h"
#include "repo-mgr.h"
struct _CcnetClient;
typedef struct _SeafileSession SeafileSession;
struct CcnetClientPool;
struct _SeafileSession {
struct _CcnetClient *session;
char *seaf_dir;
char *tmp_file_dir;
/* Config that's only loaded on start */
GKeyFile *config;
SeafDB *db;
struct CcnetClientPool *client_pool;
SeafBlockManager *block_mgr;
SeafFSManager *fs_mgr;
SeafBranchManager *branch_mgr;
SeafCommitManager *commit_mgr;
SeafRepoManager *repo_mgr;
};
extern SeafileSession *seaf;
SeafileSession *
seafile_session_new(const char *central_config_dir,
const char *seafile_dir,
struct _CcnetClient *ccnet_session);
int
seafile_session_init (SeafileSession *session);
int
seafile_session_start (SeafileSession *session);
#endif

View File

@ -3,4 +3,4 @@ seafiledir = $(includedir)/seafile
noinst_HEADERS = seafile-error.h
seafile_HEADERS = seafile-rpc.h monitor-rpc.h seafile.h
seafile_HEADERS = seafile-rpc.h seafile.h

View File

@ -1,13 +0,0 @@
#ifndef MONITOR_RPC_H
#define MONITOR_RPC_H
/**
* monitor_compute_repo_size:
* @repo_id: repo id
*
* Returns 0 if successfully scheduled computation.
*/
int
monitor_compute_repo_size (const char *repo_id, GError **error);
#endif

View File

@ -1,3 +0,0 @@
### Seafile Integration Tests
The purpose of integration tests is to build a seafile release package and run tests against it.

View File

@ -1,259 +0,0 @@
#!/usr/bin/env python
#coding: UTF-8
import os
from os.path import abspath, basename, exists, dirname, join
import sys
import argparse
import re
from collections import namedtuple
import requests
from pexpect import spawn
from utils import green, red, debug, info, warning, cd, shell, chdir, setup_logging
USERNAME = 'test@seafiletest.com'
PASSWORD = 'testtest'
ADMIN_USERNAME = 'admin@seafiletest.com'
ADMIN_PASSWORD = 'adminadmin'
MYSQL_ROOT_PASSWD = 's123'
ServerConfig = namedtuple('ServerConfig', [
'installdir',
'tarball',
'version',
'initmode',
])
def setup_server(cfg, db):
'''Setup seafile server with the setup-seafile.sh script. We use pexpect to
interactive with the setup process of the script.
'''
info('uncompressing server tarball')
shell('tar xf seafile-server_{}_x86-64.tar.gz -C {}'
.format(cfg.version, cfg.installdir))
if db == 'mysql':
autosetup_mysql(cfg)
else:
autosetup_sqlite3(cfg)
with open(join(cfg.installdir, 'conf/seahub_settings.py'), 'a') as fp:
fp.write('\n')
fp.write('DEBUG = True')
fp.write('\n')
fp.write('''\
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_RATES': {
'ping': '600/minute',
'anon': '1000/minute',
'user': '1000/minute',
},
}''')
fp.write('\n')
def autosetup_sqlite3(cfg):
setup_script = get_script(cfg, 'setup-seafile.sh')
shell('''sed -i -e '/^check_root;.*/d' "{}"'''.format(setup_script))
if cfg.initmode == 'prompt':
setup_sqlite3_prompt(setup_script)
else:
setup_sqlite3_auto(setup_script)
def setup_sqlite3_prompt(setup_script):
info('setting up seafile server with pexepct, script %s', setup_script)
answers = [
('ENTER', ''),
# server name
('server name', 'my-seafile'),
# ip or domain
('ip or domain', '127.0.0.1'),
# seafile data dir
('seafile-data', ''),
# fileserver port
('seafile fileserver', ''),
('ENTER', ''),
('ENTER', ''),
]
_answer_questions(setup_script, answers)
def setup_sqlite3_auto(setup_script):
info('setting up seafile server in auto mode, script %s', setup_script)
env = os.environ.copy()
env['SERVER_IP'] = '127.0.0.1'
shell('%s auto -n my-seafile' % setup_script, env=env)
def createdbs():
sql = '''\
create database `ccnet-existing` character set = 'utf8';
create database `seafile-existing` character set = 'utf8';
create database `seahub-existing` character set = 'utf8';
create user 'seafile'@'localhost' identified by 'seafile';
GRANT ALL PRIVILEGES ON `ccnet-existing`.* to `seafile`@localhost;
GRANT ALL PRIVILEGES ON `seafile-existing`.* to `seafile`@localhost;
GRANT ALL PRIVILEGES ON `seahub-existing`.* to `seafile`@localhost;
'''
shell('mysql -u root -p%s' % MYSQL_ROOT_PASSWD, inputdata=sql)
def autosetup_mysql(cfg):
setup_script = get_script(cfg, 'setup-seafile-mysql.sh')
if not exists(setup_script):
print 'please specify seafile script path'
if cfg.initmode == 'prompt':
createdbs()
setup_mysql_prompt(setup_script)
else :
# in auto mode, test create new db
setup_mysql_auto(setup_script)
def setup_mysql_prompt(setup_script):
info('setting up seafile server with pexepct, script %s', setup_script)
answers = [
('ENTER', ''),
# server name
('server name', 'my-seafile'),
# ip or domain
('ip or domain', '127.0.0.1'),
# seafile data dir
('seafile-data', ''),
# fileserver port
('seafile fileserver', ''),
# use existing
('choose a way to initialize seafile databases', '2'),
('host of mysql server', ''),
('port of mysql server', ''),
('Which mysql user', 'seafile'),
('password for mysql user', 'seafile'),
('ccnet database', 'ccnet-existing'),
('seafile database', 'seafile-existing'),
('seahub database', 'seahub-existing'),
('ENTER', ''),
]
_answer_questions(abspath(setup_script), answers)
def setup_mysql_auto(setup_script):
info('setting up seafile server in auto mode, script %s', setup_script)
env = os.environ.copy()
env['MYSQL_USER'] = 'seafile-new'
env['MYSQL_USER_PASSWD'] = 'seafile'
env['MYSQL_ROOT_PASSWD']= MYSQL_ROOT_PASSWD
env['CCNET_DB'] = 'ccnet-new'
env['SEAFILE_DB'] = 'seafile-new'
env['SEAHUB_DB'] = 'seahub-new'
shell('%s auto -n my-seafile -e 0' % setup_script, env=env)
def start_server(cfg):
with cd(cfg.installdir):
shell('find . -maxdepth 2 | sort | xargs ls -lhd')
seafile_sh = get_script(cfg, 'seafile.sh')
shell('{} start'.format(seafile_sh))
info('starting seahub')
seahub_sh = get_script(cfg, 'seahub.sh')
answers = [
# admin email/pass
('admin email', ADMIN_USERNAME),
('admin password', ADMIN_PASSWORD),
('admin password again', ADMIN_PASSWORD),
]
_answer_questions('{} start'.format(abspath(seahub_sh)), answers)
with cd(cfg.installdir):
shell('find . -maxdepth 2 | sort | xargs ls -lhd')
# shell('sqlite3 ccnet/PeerMgr/usermgr.db "select * from EmailUser"', cwd=INSTALLDIR)
shell('http -v localhost:8000/api2/server-info/ || true')
# shell('http -v -f POST localhost:8000/api2/auth-token/ username=admin@seafiletest.com password=adminadmin || true')
shell('netstat -nltp')
def _answer_questions(cmd, answers):
info('expect: spawing %s', cmd)
child = spawn(cmd)
child.logfile = sys.stdout
def autofill(pattern, line):
child.expect(pattern)
child.sendline(line)
for k, v in answers:
autofill(k, v)
child.sendline('')
child.logfile = None
child.interact()
def get_script(cfg, path):
"""
:type cfg: ServerConfig
"""
return join(server_dir(cfg), path)
def server_dir(cfg):
"""
:type cfg: ServerConfig
"""
return join(cfg.installdir, 'seafile-server-{}'.format(cfg.version))
def apiurl(path):
path = path.lstrip('/')
root = os.environ.get('SEAFILE_SERVER', 'http://127.0.0.1:8000')
return '{}/api2/{}'.format(root, path)
def create_test_user(cfg):
data = {'username': ADMIN_USERNAME, 'password': ADMIN_PASSWORD, }
res = requests.post(apiurl('/auth-token/'), data=data)
debug('%s %s', res.status_code, res.text)
token = res.json()['token']
data = {'password': PASSWORD, }
headers = {'Authorization': 'Token ' + token}
res = requests.put(
apiurl('/accounts/{}/'.format(USERNAME)),
data=data,
headers=headers)
assert res.status_code == 201
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-v', '--verbose', action='store_true')
ap.add_argument('--db', choices=('sqlite3', 'mysql'), default='sqlite3')
ap.add_argument('installdir')
ap.add_argument('tarball')
args = ap.parse_args()
if not exists(args.installdir):
print 'directory {} does not exist'.format(args.installdir)
sys.exit(1)
if os.listdir(args.installdir):
print 'directory {} is not empty'.format(args.installdir)
sys.exit(1)
if not exists(args.tarball):
print 'file {} does not exist'.format(args.tarball)
sys.exit(1)
m = re.match(r'^.*?_([\d\.]+).*?\.tar\.gz$', basename(args.tarball))
version = m.group(1)
cfg = ServerConfig(installdir=args.installdir,
tarball=args.tarball,
version=version)
setup_server(cfg, args.db)
start_server(cfg)
create_test_user(cfg)
if __name__ == '__main__':
setup_logging()
main()

View File

@ -1,72 +0,0 @@
#!/bin/bash
set -e -x
pip install http://effbot.org/media/downloads/PIL-1.1.7.tar.gz
pip install -r ./integration-tests/requirements.txt
pushd $HOME
# download precompiled libevhtp
libevhtp_bin=libevhtp-bin_1.2.0.tar.gz
wget https://dl.bintray.com/lins05/generic/libevhtp-bin/$libevhtp_bin
tar xf $libevhtp_bin
find $HOME/opt
# download precompiled libzdb
# zdb_bin=libzdb-bin_2.11.1.tar.gz
# wget https://dl.bintray.com/lins05/generic/libzdb-bin/$zdb_bin
# tar xf $zdb_bin
# sed -i -e "s|prefix=/opt/local|prefix=$HOME/opt/local|g" $HOME/opt/local/lib/pkgconfig/zdb.pc
# find $HOME/opt
pushd /tmp/
git clone --depth=1 https://github.com/haiwen/libzdb.git
cd libzdb
./bootstrap
./configure --prefix=$HOME/opt/local
make -j2
make install
popd
# download seahub thirdpart python libs
WGET="wget --no-check-certificate"
downloads=$HOME/downloads
thirdpart=$HOME/thirdpart
mkdir -p $downloads $thirdpart
cd $thirdpart
save_pythonpath=$PYTHONPATH
export PYTHONPATH=.
urls=(
https://pypi.python.org/packages/source/p/pytz/pytz-2016.1.tar.gz
https://www.djangoproject.com/m/releases/1.8/Django-1.8.10.tar.gz
https://pypi.python.org/packages/source/d/django-statici18n/django-statici18n-1.1.3.tar.gz
https://pypi.python.org/packages/source/d/djangorestframework/djangorestframework-3.3.2.tar.gz
https://pypi.python.org/packages/source/d/django_compressor/django_compressor-1.4.tar.gz
https://pypi.python.org/packages/source/j/jsonfield/jsonfield-1.0.3.tar.gz
https://pypi.python.org/packages/source/d/django-post_office/django-post_office-2.0.6.tar.gz
http://pypi.python.org/packages/source/g/gunicorn/gunicorn-19.4.5.tar.gz
http://pypi.python.org/packages/source/f/flup/flup-1.0.2.tar.gz
https://pypi.python.org/packages/source/c/chardet/chardet-2.3.0.tar.gz
https://labix.org/download/python-dateutil/python-dateutil-1.5.tar.gz
https://pypi.python.org/packages/source/s/six/six-1.9.0.tar.gz
https://pypi.python.org/packages/source/d/django-picklefield/django-picklefield-0.3.2.tar.gz
https://pypi.python.org/packages/source/d/django-constance/django-constance-1.0.1.tar.gz
https://pypi.python.org/packages/source/j/jdcal/jdcal-1.2.tar.gz
https://pypi.python.org/packages/source/e/et_xmlfile/et_xmlfile-1.0.1.tar.gz
https://pypi.python.org/packages/source/o/openpyxl/openpyxl-2.3.0.tar.gz
)
for url in ${urls[*]}; do
path="${downloads}/$(basename $url)"
if [[ ! -e $path ]]; then
$WGET -O $path $url
fi
easy_install -d . $path
done
export PYTHONPATH=$save_pythonpath
popd

View File

@ -1,7 +0,0 @@
termcolor==1.1.0
prettytable==0.7.2
pexpect==4.0
requests==2.8.0
httpie
django-constance[database]
MySQL-python==1.2.5

View File

@ -1,299 +0,0 @@
#!/usr/bin/env python
import os
from os.path import abspath, basename, exists, expanduser, join
import sys
import re
import glob
import json
import logging
import requests
import termcolor
from pexpect import spawn
from utils import green, red, debug, info, warning, cd, shell, chdir, setup_logging
from autosetup import (setup_server, ServerConfig, get_script, server_dir,
start_server, create_test_user, MYSQL_ROOT_PASSWD)
TOPDIR = abspath(join(os.getcwd(), '..'))
PREFIX = expanduser('~/opt/local')
SRCDIR = '/tmp/src'
INSTALLDIR = '/tmp/haiwen'
THIRDPARTDIR = expanduser('~/thirdpart')
logger = logging.getLogger(__file__)
seafile_version = ''
TRAVIS_BRANCH = os.environ.get('TRAVIS_BRANCH', 'master')
def make_build_env():
env = dict(os.environ)
libsearpc_dir = abspath(join(TOPDIR, 'libsearpc'))
ccnet_dir = abspath(join(TOPDIR, 'ccnet'))
def _env_add(*a, **kw):
kw['env'] = env
return prepend_env_value(*a, **kw)
_env_add('CPPFLAGS', '-I%s' % join(PREFIX, 'include'), seperator=' ')
_env_add('LDFLAGS', '-L%s' % os.path.join(PREFIX, 'lib'), seperator=' ')
_env_add('LDFLAGS', '-L%s' % os.path.join(PREFIX, 'lib64'), seperator=' ')
_env_add('PATH', os.path.join(PREFIX, 'bin'))
_env_add('PATH', THIRDPARTDIR)
_env_add('PKG_CONFIG_PATH', os.path.join(PREFIX, 'lib', 'pkgconfig'))
_env_add('PKG_CONFIG_PATH', os.path.join(PREFIX, 'lib64', 'pkgconfig'))
_env_add('PKG_CONFIG_PATH', libsearpc_dir)
_env_add('PKG_CONFIG_PATH', ccnet_dir)
for key in ('PATH', 'PKG_CONFIG_PATH', 'CPPFLAGS', 'LDFLAGS',
'PYTHONPATH'):
info('%s: %s', key, env.get(key, ''))
return env
def prepend_env_value(name, value, seperator=':', env=None):
'''append a new value to a list'''
env = env or os.environ
current_value = env.get(name, '')
new_value = value
if current_value:
new_value += seperator + current_value
env[name] = new_value
return env
def get_project_branch(project, default_branch='master'):
if project.name == 'seafile':
return TRAVIS_BRANCH
conf = json.loads(requests.get(
'https://raw.githubusercontent.com/haiwen/seafile-test-deploy/master/branches.json').text)
return conf.get(TRAVIS_BRANCH, {}).get(project.name,
default_branch)
class Project(object):
configure_cmd = './configure'
def __init__(self, name):
self.name = name
self.version = ''
@property
def url(self):
return 'https://www.github.com/haiwen/{}.git'.format(self.name)
@property
def projectdir(self):
return join(TOPDIR, self.name)
@property
def branch(self):
return get_project_branch(self)
def clone(self):
if exists(self.name):
with cd(self.name):
shell('git fetch origin --tags')
else:
shell('git clone --depth=1 --branch {} {}'.format(self.branch,
self.url))
@chdir
def make_dist(self):
info('making tarball for %s', self.name)
if exists('./autogen.sh'):
shell('./autogen.sh')
shell(self.configure_cmd, env=make_build_env())
shell('make dist')
@chdir
def copy_dist(self):
self.make_dist()
tarball = glob.glob('*.tar.gz')[0]
info('copying %s to %s', tarball, SRCDIR)
shell('cp {} {}'.format(tarball, SRCDIR))
m = re.match('{}-(.*).tar.gz'.format(self.name), basename(tarball))
if m:
self.version = m.group(1)
@chdir
def use_branch(self, branch):
shell('git checkout {}'.format(branch))
class Ccnet(Project):
def __init__(self):
super(Ccnet, self).__init__('ccnet')
class Seafile(Project):
configure_cmd = './configure --enable-client --enable-server'
def __init__(self):
super(Seafile, self).__init__('seafile')
@chdir
def copy_dist(self):
super(Seafile, self).copy_dist()
global seafile_version
seafile_version = self.version
class Seahub(Project):
def __init__(self):
super(Seahub, self).__init__('seahub')
@chdir
def make_dist(self):
cmds = [
# 'git add -f media/css/*.css',
# 'git commit -a -m "%s"' % msg,
'./tools/gen-tarball.py --version={} --branch=HEAD >/dev/null'
.format(seafile_version),
]
for cmd in cmds:
shell(cmd, env=make_build_env())
class SeafDAV(Project):
def __init__(self):
super(SeafDAV, self).__init__('seafdav')
@chdir
def make_dist(self):
shell('make')
class SeafObj(Project):
def __init__(self):
super(SeafObj, self).__init__('seafobj')
@chdir
def make_dist(self):
shell('make dist')
def build_server(libsearpc, ccnet, seafile):
cmd = [
'python',
join(TOPDIR, 'seafile/scripts/build/build-server.py'),
'--yes',
'--version=%s' % seafile.version,
'--libsearpc_version=%s' % libsearpc.version,
'--ccnet_version=%s' % ccnet.version,
'--seafile_version=%s' % seafile.version,
'--thirdpartdir=%s' % THIRDPARTDIR,
'--srcdir=%s' % SRCDIR,
'--jobs=4',
]
shell(cmd, shell=False, env=make_build_env())
def fetch_and_build():
libsearpc = Project('libsearpc')
ccnet = Ccnet()
seafile = Seafile()
seahub = Seahub()
seafobj = SeafObj()
seafdav = SeafDAV()
for project in (libsearpc, ccnet, seafile, seahub, seafdav, seafobj):
if project.name != 'seafile':
project.clone()
project.copy_dist()
build_server(libsearpc, ccnet, seafile)
def run_tests(cfg):
# run_python_seafile_tests()
# run_seafdav_tests(cfg)
# must stop seafile server before running seaf-gc
shell('{} stop'.format(get_script(cfg, 'seafile.sh')))
shell('{} stop'.format(get_script(cfg, 'seahub.sh')))
shell('{} --verbose --rm-deleted'.format(get_script(cfg, 'seaf-gc.sh')))
def run_python_seafile_tests():
python_seafile = Project('python-seafile')
if not exists(python_seafile.projectdir):
python_seafile.clone()
shell('pip install -r {}/requirements.txt'.format(
python_seafile.projectdir))
with cd(python_seafile.projectdir):
# install python-seafile because seafdav tests needs it
shell('python setup.py install')
shell('py.test')
def _seafdav_env(cfg):
env = dict(os.environ)
env['CCNET_CONF_DIR'] = join(INSTALLDIR, 'ccnet')
env['SEAFILE_CONF_DIR'] = join(INSTALLDIR, 'seafile-data')
env['SEAFILE_CENTRAL_CONF_DIR'] = join(INSTALLDIR, 'conf')
for path in glob.glob(join(
server_dir(cfg), 'seafile/lib*/python*/*-packages')):
prepend_env_value('PYTHONPATH', path, env=env)
return env
def run_seafdav_tests(cfg):
seafdav = SeafDAV()
shell('pip install -r {}/test-requirements.txt'.format(seafdav.projectdir))
with cd(seafdav.projectdir):
shell('nosetests -v -s', env=_seafdav_env(cfg))
def _mkdirs(*paths):
for path in paths:
if not exists(path):
os.mkdir(path)
def main():
_mkdirs(SRCDIR, INSTALLDIR)
setup_logging()
fetch_and_build()
for db in ('sqlite3', 'mysql'):
if db == 'mysql':
shell('mysqladmin -u root password %s' % MYSQL_ROOT_PASSWD)
for i in ('prompt', 'auto'):
shell('rm -rf {}/*'.format(INSTALLDIR))
setup_and_test(db, i)
def setup_and_test(db, initmode):
cfg = ServerConfig(
installdir=INSTALLDIR,
tarball=join(TOPDIR, 'seafile-server_{}_x86-64.tar.gz'.format(
seafile_version)),
version=seafile_version,
initmode=initmode)
info('Setting up seafile server with %s database', db)
setup_server(cfg, db)
# enable webdav, we're going to seafdav tests later
shell('''sed -i -e "s/enabled = false/enabled = true/g" {}'''
.format(join(INSTALLDIR, 'conf/seafdav.conf')))
try:
start_server(cfg)
info('Testing seafile server with %s database', db)
create_test_user(cfg)
run_tests(cfg)
except:
for logfile in glob.glob('{}/logs/*.log'.format(INSTALLDIR)):
shell('echo {0}; cat {0}'.format(logfile))
for logfile in glob.glob('{}/seafile-server-{}/runtime/*.log'.format(
INSTALLDIR, seafile_version)):
shell('echo {0}; cat {0}'.format(logfile))
raise
if __name__ == '__main__':
os.chdir(TOPDIR)
main()

View File

@ -1,81 +0,0 @@
#coding: UTF-8
import os
from os.path import abspath, basename, exists, expanduser, join
import sys
import re
import logging
from contextlib import contextmanager
from subprocess import Popen, PIPE, CalledProcessError
import termcolor
import requests
from pexpect import spawn
logger = logging.getLogger(__file__)
def _color(s, color):
return s if not os.isatty(sys.stdout.fileno()) \
else termcolor.colored(str(s), color)
def green(s):
return _color(s, 'green')
def red(s):
return _color(s, 'red')
def debug(fmt, *a):
logger.debug(green(fmt), *a)
def info(fmt, *a):
logger.info(green(fmt), *a)
def warning(fmt, *a):
logger.warn(red(fmt), *a)
def shell(cmd, inputdata=None, **kw):
info('calling "%s" in %s', cmd, kw.get('cwd', os.getcwd()))
kw['shell'] = not isinstance(cmd, list)
kw['stdin'] = PIPE if inputdata else None
p = Popen(cmd, **kw)
if inputdata:
p.communicate(inputdata)
p.wait()
if p.returncode:
raise CalledProcessError(p.returncode, cmd)
@contextmanager
def cd(path):
olddir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(olddir)
def chdir(func):
def wrapped(self, *w, **kw):
with cd(self.projectdir):
return func(self, *w, **kw)
return wrapped
def setup_logging():
kw = {
'format': '[%(asctime)s][%(module)s]: %(message)s',
'datefmt': '%m/%d/%Y %H:%M:%S',
'level': logging.DEBUG,
'stream': sys.stdout,
}
logging.basicConfig(**kw)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)

View File

@ -1 +0,0 @@
SUBDIRS = seafile seaserv

View File

@ -1,3 +0,0 @@
seafiledir=${pyexecdir}/seafile
seafile_PYTHON = __init__.py rpcclient.py

View File

@ -1,10 +0,0 @@
from rpcclient import SeafileRpcClient as RpcClient
from rpcclient import SeafileThreadedRpcClient as ThreadedRpcClient
from rpcclient import MonitorRpcClient as MonitorRpcClient
from rpcclient import SeafServerRpcClient as ServerRpcClient
from rpcclient import SeafServerThreadedRpcClient as ServerThreadedRpcClient
class TaskType(object):
DOWNLOAD = 0
UPLOAD = 1

View File

@ -1,954 +0,0 @@
import ccnet
from pysearpc import searpc_func, SearpcError
class SeafileRpcClient(ccnet.RpcClientBase):
"""RPC used in client"""
def __init__(self, ccnet_client_pool, *args, **kwargs):
ccnet.RpcClientBase.__init__(self, ccnet_client_pool, "seafile-rpcserver",
*args, **kwargs)
@searpc_func("object", [])
def seafile_get_session_info():
pass
get_session_info = seafile_get_session_info
@searpc_func("int", ["string"])
def seafile_calc_dir_size(path):
pass
calc_dir_size = seafile_calc_dir_size
@searpc_func("int64", [])
def seafile_get_total_block_size():
pass
get_total_block_size = seafile_get_total_block_size;
@searpc_func("string", ["string"])
def seafile_get_config(key):
pass
get_config = seafile_get_config
@searpc_func("int", ["string", "string"])
def seafile_set_config(key, value):
pass
set_config = seafile_set_config
@searpc_func("int", ["string"])
def seafile_get_config_int(key):
pass
get_config_int = seafile_get_config_int
@searpc_func("int", ["string", "int"])
def seafile_set_config_int(key, value):
pass
set_config_int = seafile_set_config_int
@searpc_func("int", ["int"])
def seafile_set_upload_rate_limit(limit):
pass
set_upload_rate_limit = seafile_set_upload_rate_limit
@searpc_func("int", ["int"])
def seafile_set_download_rate_limit(limit):
pass
set_download_rate_limit = seafile_set_download_rate_limit
### repo
@searpc_func("objlist", ["int", "int"])
def seafile_get_repo_list():
pass
get_repo_list = seafile_get_repo_list
@searpc_func("object", ["string"])
def seafile_get_repo():
pass
get_repo = seafile_get_repo
@searpc_func("string", ["string", "string", "string", "string", "string", "int"])
def seafile_create_repo(name, desc, passwd, base, relay_id, keep_history):
pass
create_repo = seafile_create_repo
@searpc_func("int", ["string"])
def seafile_destroy_repo(repo_id):
pass
remove_repo = seafile_destroy_repo
@searpc_func("objlist", ["string", "string", "string", "int"])
def seafile_diff():
pass
get_diff = seafile_diff
@searpc_func("object", ["string", "int", "string"])
def seafile_get_commit(repo_id, version, commit_id):
pass
get_commit = seafile_get_commit
@searpc_func("objlist", ["string", "int", "int"])
def seafile_get_commit_list():
pass
get_commit_list = seafile_get_commit_list
@searpc_func("objlist", ["string"])
def seafile_branch_gets(repo_id):
pass
branch_gets = seafile_branch_gets
@searpc_func("int", ["string", "string"])
def seafile_branch_add(repo_id, branch):
pass
branch_add = seafile_branch_add
##### clone related
@searpc_func("string", ["string", "string"])
def gen_default_worktree(worktree_parent, repo_name):
pass
@searpc_func("string", ["string", "int", "string", "string", "string", "string", "string", "string", "string", "string", "string", "int", "string"])
def seafile_clone(repo_id, repo_version, peer_id, repo_name, worktree, token, password, magic, peer_addr, peer_port, email, random_key, enc_version, more_info):
pass
clone = seafile_clone
@searpc_func("string", ["string", "int", "string", "string", "string", "string", "string", "string", "string", "string", "string", "int", "string"])
def seafile_download(repo_id, repo_version, peer_id, repo_name, wt_parent, token, password, magic, peer_addr, peer_port, email, random_key, enc_version, more_info):
pass
download = seafile_download
@searpc_func("int", ["string"])
def seafile_cancel_clone_task(repo_id):
pass
cancel_clone_task = seafile_cancel_clone_task
@searpc_func("int", ["string"])
def seafile_remove_clone_task(repo_id):
pass
remove_clone_task = seafile_remove_clone_task
@searpc_func("objlist", [])
def seafile_get_clone_tasks():
pass
get_clone_tasks = seafile_get_clone_tasks
@searpc_func("object", ["string"])
def seafile_find_transfer_task(repo_id):
pass
find_transfer_task = seafile_find_transfer_task
@searpc_func("object", ["string"])
def seafile_get_checkout_task(repo_id):
pass
get_checkout_task = seafile_get_checkout_task
### sync
@searpc_func("int", ["string", "string"])
def seafile_sync(repo_id, peer_id):
pass
sync = seafile_sync
@searpc_func("object", ["string"])
def seafile_get_repo_sync_task():
pass
get_repo_sync_task = seafile_get_repo_sync_task
@searpc_func("object", ["string"])
def seafile_get_repo_sync_info():
pass
get_repo_sync_info = seafile_get_repo_sync_info
@searpc_func("int", [])
def seafile_is_auto_sync_enabled():
pass
is_auto_sync_enabled = seafile_is_auto_sync_enabled
###### Property Management #########
@searpc_func("int", ["string", "string"])
def seafile_set_repo_passwd(repo_id, passwd):
pass
set_repo_passwd = seafile_set_repo_passwd
@searpc_func("int", ["string", "string", "string"])
def seafile_set_repo_property(repo_id, key, value):
pass
set_repo_property = seafile_set_repo_property
@searpc_func("string", ["string", "string"])
def seafile_get_repo_property(repo_id, key):
pass
get_repo_property = seafile_get_repo_property
@searpc_func("string", ["string"])
def seafile_get_repo_relay_address(repo_id):
pass
get_repo_relay_address = seafile_get_repo_relay_address
@searpc_func("string", ["string"])
def seafile_get_repo_relay_port(repo_id):
pass
get_repo_relay_port = seafile_get_repo_relay_port
@searpc_func("int", ["string", "string", "string"])
def seafile_update_repo_relay_info(repo_id, addr, port):
pass
update_repo_relay_info = seafile_update_repo_relay_info
@searpc_func("int", ["string", "string"])
def seafile_set_repo_token(repo_id, token):
pass
set_repo_token = seafile_set_repo_token
@searpc_func("string", ["string"])
def seafile_get_repo_token(repo_id):
pass
get_repo_token = seafile_get_repo_token
@searpc_func("object", ["int", "string", "string"])
def seafile_generate_magic_and_random_key(enc_version, repo_id, password):
pass
generate_magic_and_random_key = seafile_generate_magic_and_random_key
class SeafileThreadedRpcClient(ccnet.RpcClientBase):
"""RPC used in client that run in a thread"""
def __init__(self, ccnet_client_pool, *args, **kwargs):
ccnet.RpcClientBase.__init__(self, ccnet_client_pool,
"seafile-threaded-rpcserver",
*args, **kwargs)
@searpc_func("int", ["string", "string", "string"])
def seafile_edit_repo():
pass
edit_repo = seafile_edit_repo
@searpc_func("int", ["string", "string"])
def seafile_reset(repo_id, commit_id):
pass
reset = seafile_reset
@searpc_func("int", ["string", "string"])
def seafile_revert(repo_id, commit_id):
pass
revert = seafile_revert
@searpc_func("int", ["string", "string"])
def seafile_add(repo_id, path):
pass
add = seafile_add
@searpc_func("int", ["string", "string"])
def seafile_rm():
pass
rm = seafile_rm
@searpc_func("string", ["string", "string"])
def seafile_commit(repo_id, description):
pass
commit = seafile_commit
class MonitorRpcClient(ccnet.RpcClientBase):
def __init__(self, ccnet_client_pool):
ccnet.RpcClientBase.__init__(self, ccnet_client_pool, "monitor-rpcserver")
@searpc_func("int", ["string"])
def monitor_get_repos_size(repo_ids):
pass
get_repos_size = monitor_get_repos_size
class SeafServerRpcClient(ccnet.RpcClientBase):
def __init__(self, ccnet_client_pool, *args, **kwargs):
ccnet.RpcClientBase.__init__(self, ccnet_client_pool, "seafserv-rpcserver",
*args, **kwargs)
# token for web access to repo
@searpc_func("string", ["string", "string", "string", "string", "int"])
def seafile_web_get_access_token(repo_id, obj_id, op, username, use_onetime=1):
pass
web_get_access_token = seafile_web_get_access_token
@searpc_func("object", ["string"])
def seafile_web_query_access_token(token):
pass
web_query_access_token = seafile_web_query_access_token
@searpc_func("string", ["string"])
def seafile_query_zip_progress(token):
pass
query_zip_progress = seafile_query_zip_progress
###### GC ####################
@searpc_func("int", [])
def seafile_gc():
pass
gc = seafile_gc
@searpc_func("int", [])
def seafile_gc_get_progress():
pass
gc_get_progress = seafile_gc_get_progress
# password management
@searpc_func("int", ["string", "string"])
def seafile_is_passwd_set(repo_id, user):
pass
is_passwd_set = seafile_is_passwd_set
@searpc_func("object", ["string", "string"])
def seafile_get_decrypt_key(repo_id, user):
pass
get_decrypt_key = seafile_get_decrypt_key
# Copy tasks
@searpc_func("object", ["string"])
def get_copy_task(task_id):
pass
@searpc_func("int", ["string"])
def cancel_copy_task(task_id):
pass
class SeafServerThreadedRpcClient(ccnet.RpcClientBase):
def __init__(self, ccnet_client_pool, *args, **kwargs):
ccnet.RpcClientBase.__init__(self, ccnet_client_pool,
"seafserv-threaded-rpcserver",
*args, **kwargs)
# repo manipulation
@searpc_func("string", ["string", "string", "string", "string"])
def seafile_create_repo(name, desc, owner_email, passwd):
pass
create_repo = seafile_create_repo
@searpc_func("string", ["string", "string", "string", "string", "string", "string", "int"])
def seafile_create_enc_repo(repo_id, name, desc, owner_email, magic, random_key, enc_version):
pass
create_enc_repo = seafile_create_enc_repo
@searpc_func("object", ["string"])
def seafile_get_repo(repo_id):
pass
get_repo = seafile_get_repo
@searpc_func("int", ["string"])
def seafile_destroy_repo(repo_id):
pass
remove_repo = seafile_destroy_repo
@searpc_func("objlist", ["int", "int"])
def seafile_get_repo_list(start, limit):
pass
get_repo_list = seafile_get_repo_list
@searpc_func("int64", [])
def seafile_count_repos():
pass
count_repos = seafile_count_repos
@searpc_func("int", ["string", "string", "string", "string"])
def seafile_edit_repo(repo_id, name, description, user):
pass
edit_repo = seafile_edit_repo
@searpc_func("int", ["string", "string"])
def seafile_is_repo_owner(user_id, repo_id):
pass
is_repo_owner = seafile_is_repo_owner
@searpc_func("int", ["string", "string"])
def seafile_set_repo_owner(email, repo_id):
pass
set_repo_owner = seafile_set_repo_owner
@searpc_func("string", ["string"])
def seafile_get_repo_owner(repo_id):
pass
get_repo_owner = seafile_get_repo_owner
@searpc_func("objlist", [])
def seafile_get_orphan_repo_list():
pass
get_orphan_repo_list = seafile_get_orphan_repo_list
@searpc_func("objlist", ["string", "int"])
def seafile_list_owned_repos(user_id, ret_corrupted):
pass
list_owned_repos = seafile_list_owned_repos
@searpc_func("int64", ["string"])
def seafile_server_repo_size(repo_id):
pass
server_repo_size = seafile_server_repo_size
@searpc_func("int", ["string", "string"])
def seafile_repo_set_access_property(repo_id, role):
pass
repo_set_access_property = seafile_repo_set_access_property
@searpc_func("string", ["string"])
def seafile_repo_query_access_property(repo_id):
pass
repo_query_access_property = seafile_repo_query_access_property
@searpc_func("int", ["string", "string", "string"])
def seafile_revert_on_server(repo_id, commit_id, user_name):
pass
revert_on_server = seafile_revert_on_server
@searpc_func("objlist", ["string", "string", "string"])
def seafile_diff():
pass
get_diff = seafile_diff
@searpc_func("int", ["string", "string", "string", "string", "string"])
def seafile_post_file(repo_id, tmp_file_path, parent_dir, filename, user):
pass
post_file = seafile_post_file
@searpc_func("int", ["string", "string", "string", "string"])
def seafile_post_dir(repo_id, parent_dir, new_dir_name, user):
pass
post_dir = seafile_post_dir
@searpc_func("int", ["string", "string", "string", "string"])
def seafile_post_empty_file(repo_id, parent_dir, filename, user):
pass
post_empty_file = seafile_post_empty_file
@searpc_func("int", ["string", "string", "string", "string", "string", "string"])
def seafile_put_file(repo_id, tmp_file_path, parent_dir, filename, user, head_id):
pass
put_file = seafile_put_file
@searpc_func("int", ["string", "string", "string", "string"])
def seafile_del_file(repo_id, parent_dir, filename, user):
pass
del_file = seafile_del_file
@searpc_func("object", ["string", "string", "string", "string", "string", "string", "string", "int", "int"])
def seafile_copy_file(src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, user, need_progress, synchronous):
pass
copy_file = seafile_copy_file
@searpc_func("object", ["string", "string", "string", "string", "string", "string", "int", "string", "int", "int"])
def seafile_move_file(src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, replace, user, need_progress, synchronous):
pass
move_file = seafile_move_file
@searpc_func("int", ["string", "string", "string", "string", "string"])
def seafile_rename_file(repo_id, parent_dir, oldname, newname, user):
pass
rename_file = seafile_rename_file
@searpc_func("int", ["string", "string"])
def seafile_is_valid_filename(repo_id, filename):
pass
is_valid_filename = seafile_is_valid_filename
@searpc_func("object", ["string", "int", "string"])
def seafile_get_commit(repo_id, version, commit_id):
pass
get_commit = seafile_get_commit
@searpc_func("string", ["string", "string", "int", "int"])
def seafile_list_file_blocks(repo_id, file_id, offset, limit):
pass
list_file_blocks = seafile_list_file_blocks
@searpc_func("objlist", ["string", "string", "int", "int"])
def seafile_list_dir(repo_id, dir_id, offset, limit):
pass
list_dir = seafile_list_dir
@searpc_func("objlist", ["string", "string", "sting", "string", "int", "int"])
def list_dir_with_perm(repo_id, dir_path, dir_id, user, offset, limit):
pass
@searpc_func("int64", ["string", "int", "string"])
def seafile_get_file_size(store_id, version, file_id):
pass
get_file_size = seafile_get_file_size
@searpc_func("int64", ["string", "int", "string"])
def seafile_get_dir_size(store_id, version, dir_id):
pass
get_dir_size = seafile_get_dir_size
@searpc_func("objlist", ["string", "string", "string"])
def seafile_list_dir_by_path(repo_id, commit_id, path):
pass
list_dir_by_path = seafile_list_dir_by_path
@searpc_func("string", ["string", "string", "string"])
def seafile_get_dir_id_by_commit_and_path(repo_id, commit_id, path):
pass
get_dir_id_by_commit_and_path = seafile_get_dir_id_by_commit_and_path
@searpc_func("string", ["string", "string"])
def seafile_get_file_id_by_path(repo_id, path):
pass
get_file_id_by_path = seafile_get_file_id_by_path
@searpc_func("string", ["string", "string"])
def seafile_get_dir_id_by_path(repo_id, path):
pass
get_dir_id_by_path = seafile_get_dir_id_by_path
@searpc_func("string", ["string", "string", "string"])
def seafile_get_file_id_by_commit_and_path(repo_id, commit_id, path):
pass
get_file_id_by_commit_and_path = seafile_get_file_id_by_commit_and_path
@searpc_func("object", ["string", "string"])
def seafile_get_dirent_by_path(repo_id, commit_id, path):
pass
get_dirent_by_path = seafile_get_dirent_by_path
@searpc_func("objlist", ["string", "string", "int", "int", "int"])
def seafile_list_file_revisions(repo_id, path, max_revision, limit, show_days):
pass
list_file_revisions = seafile_list_file_revisions
@searpc_func("objlist", ["string", "string"])
def seafile_calc_files_last_modified(repo_id, parent_dir, limit):
pass
calc_files_last_modified = seafile_calc_files_last_modified
@searpc_func("int", ["string", "string", "string", "string"])
def seafile_revert_file(repo_id, commit_id, path, user):
pass
revert_file = seafile_revert_file
@searpc_func("string", ["string", "string"])
def seafile_check_repo_blocks_missing(repo_id, blklist):
pass
check_repo_blocks_missing = seafile_check_repo_blocks_missing
@searpc_func("int", ["string", "string", "string", "string"])
def seafile_revert_dir(repo_id, commit_id, path, user):
pass
revert_dir = seafile_revert_dir
@searpc_func("objlist", ["string", "int", "string", "string", "int"])
def get_deleted(repo_id, show_days, path, scan_stat, limit):
pass
# share repo to user
@searpc_func("string", ["string", "string", "string", "string"])
def seafile_add_share(repo_id, from_email, to_email, permission):
pass
add_share = seafile_add_share
@searpc_func("objlist", ["string", "string", "int", "int"])
def seafile_list_share_repos(email, query_col, start, limit):
pass
list_share_repos = seafile_list_share_repos
@searpc_func("objlist", ["string", "string"])
def seafile_list_repo_shared_to(from_user, repo_id):
pass
list_repo_shared_to = seafile_list_repo_shared_to
@searpc_func("int", ["string", "string", "string", "string", "string", "string"])
def share_subdir_to_user(repo_id, path, owner, share_user, permission, passwd):
pass
@searpc_func("int", ["string", "string", "string", "string"])
def unshare_subdir_for_user(repo_id, path, owner, share_user):
pass
@searpc_func("int", ["string", "string", "string", "string", "string"])
def update_share_subdir_perm_for_user(repo_id, path, owner, share_user, permission):
pass
@searpc_func("objlist", ["int", "string", "string", "int", "int"])
def seafile_list_org_share_repos(org_id, email, query_col, start, limit):
pass
list_org_share_repos = seafile_list_org_share_repos
@searpc_func("int", ["string", "string", "string"])
def seafile_remove_share(repo_id, from_email, to_email):
pass
remove_share = seafile_remove_share
@searpc_func("int", ["string", "string", "string", "string"])
def set_share_permission(repo_id, from_email, to_email, permission):
pass
# share repo to group
@searpc_func("int", ["string", "int", "string", "string"])
def seafile_group_share_repo(repo_id, group_id, user_name, permisson):
pass
group_share_repo = seafile_group_share_repo
@searpc_func("int", ["string", "int", "string"])
def seafile_group_unshare_repo(repo_id, group_id, user_name):
pass
group_unshare_repo = seafile_group_unshare_repo
@searpc_func("string", ["string"])
def seafile_get_shared_groups_by_repo(repo_id):
pass
get_shared_groups_by_repo=seafile_get_shared_groups_by_repo
@searpc_func("objlist", ["string", "string"])
def seafile_list_repo_shared_group(from_user, repo_id):
pass
list_repo_shared_group = seafile_list_repo_shared_group
@searpc_func("objlist", ["string", "string", "string"])
def seafile_get_shared_users_for_subdir(repo_id, path, from_user):
pass
get_shared_users_for_subdir = seafile_get_shared_users_for_subdir
@searpc_func("objlist", ["string", "string", "string"])
def seafile_get_shared_groups_for_subdir(repo_id, path, from_user):
pass
get_shared_groups_for_subdir = seafile_get_shared_groups_for_subdir
@searpc_func("int", ["string", "string", "string", "int", "string", "string"])
def share_subdir_to_group(repo_id, path, owner, share_group, permission, passwd):
pass
@searpc_func("int", ["string", "string", "string", "int"])
def unshare_subdir_for_group(repo_id, path, owner, share_group):
pass
@searpc_func("int", ["string", "string", "string", "int", "string"])
def update_share_subdir_perm_for_group(repo_id, path, owner, share_group, permission):
pass
@searpc_func("string", ["int"])
def seafile_get_group_repoids(group_id):
pass
get_group_repoids = seafile_get_group_repoids
@searpc_func("objlist", ["int"])
def seafile_get_repos_by_group(group_id):
pass
get_repos_by_group = seafile_get_repos_by_group
@searpc_func("objlist", ["string"])
def get_group_repos_by_owner(user_name):
pass
@searpc_func("string", ["string"])
def get_group_repo_owner(repo_id):
pass
@searpc_func("int", ["int", "string"])
def seafile_remove_repo_group(group_id, user_name):
pass
remove_repo_group = seafile_remove_repo_group
@searpc_func("int", ["int", "string", "string"])
def set_group_repo_permission(group_id, repo_id, permission):
pass
# branch and commit
@searpc_func("objlist", ["string"])
def seafile_branch_gets(repo_id):
pass
branch_gets = seafile_branch_gets
@searpc_func("objlist", ["string", "int", "int"])
def seafile_get_commit_list(repo_id, offset, limit):
pass
get_commit_list = seafile_get_commit_list
###### Token ####################
@searpc_func("int", ["string", "string", "string"])
def seafile_set_repo_token(repo_id, email, token):
pass
set_repo_token = seafile_set_repo_token
@searpc_func("string", ["string", "string"])
def seafile_get_repo_token_nonnull(repo_id, email):
"""Get the token of the repo for the email user. If the token does not
exist, a new one is generated and returned.
"""
pass
get_repo_token_nonnull = seafile_get_repo_token_nonnull
@searpc_func("string", ["string", "string"])
def seafile_generate_repo_token(repo_id, email):
pass
generate_repo_token = seafile_generate_repo_token
@searpc_func("int", ["string", "string"])
def seafile_delete_repo_token(repo_id, token, user):
pass
delete_repo_token = seafile_delete_repo_token
@searpc_func("objlist", ["string"])
def seafile_list_repo_tokens(repo_id):
pass
list_repo_tokens = seafile_list_repo_tokens
@searpc_func("objlist", ["string"])
def seafile_list_repo_tokens_by_email(email):
pass
list_repo_tokens_by_email = seafile_list_repo_tokens_by_email
@searpc_func("int", ["string", "string"])
def seafile_delete_repo_tokens_by_peer_id(email, user_id):
pass
delete_repo_tokens_by_peer_id = seafile_delete_repo_tokens_by_peer_id
@searpc_func("int", ["string"])
def delete_repo_tokens_by_email(email):
pass
###### quota ##########
@searpc_func("int64", ["string"])
def seafile_get_user_quota_usage(user_id):
pass
get_user_quota_usage = seafile_get_user_quota_usage
@searpc_func("int64", ["string"])
def seafile_get_user_share_usage(user_id):
pass
get_user_share_usage = seafile_get_user_share_usage
@searpc_func("int64", ["int"])
def seafile_get_org_quota_usage(org_id):
pass
get_org_quota_usage = seafile_get_org_quota_usage
@searpc_func("int64", ["int", "string"])
def seafile_get_org_user_quota_usage(org_id, user):
pass
get_org_user_quota_usage = seafile_get_org_user_quota_usage
@searpc_func("int", ["string", "int64"])
def set_user_quota(user, quota):
pass
@searpc_func("int64", ["string"])
def get_user_quota(user):
pass
@searpc_func("int", ["int", "int64"])
def set_org_quota(org_id, quota):
pass
@searpc_func("int64", ["int"])
def get_org_quota(org_id):
pass
@searpc_func("int", ["int", "string", "int64"])
def set_org_user_quota(org_id, user, quota):
pass
@searpc_func("int64", ["int", "string"])
def get_org_user_quota(org_id, user):
pass
@searpc_func("int", ["string"])
def check_quota(repo_id):
pass
# password management
@searpc_func("int", ["string", "string"])
def seafile_check_passwd(repo_id, magic):
pass
check_passwd = seafile_check_passwd
@searpc_func("int", ["string", "string", "string"])
def seafile_set_passwd(repo_id, user, passwd):
pass
set_passwd = seafile_set_passwd
@searpc_func("int", ["string", "string"])
def seafile_unset_passwd(repo_id, user, passwd):
pass
unset_passwd = seafile_unset_passwd
# repo permission checking
@searpc_func("string", ["string", "string"])
def check_permission(repo_id, user):
pass
# folder permission check
@searpc_func("string", ["string", "string", "string"])
def check_permission_by_path(repo_id, path, user):
pass
# org repo
@searpc_func("string", ["string", "string", "string", "string", "string", "int", "int"])
def seafile_create_org_repo(name, desc, user, passwd, magic, random_key, enc_version, org_id):
pass
create_org_repo = seafile_create_org_repo
@searpc_func("int", ["string"])
def seafile_get_org_id_by_repo_id(repo_id):
pass
get_org_id_by_repo_id = seafile_get_org_id_by_repo_id
@searpc_func("objlist", ["int", "int", "int"])
def seafile_get_org_repo_list(org_id, start, limit):
pass
get_org_repo_list = seafile_get_org_repo_list
@searpc_func("int", ["int"])
def seafile_remove_org_repo_by_org_id(org_id):
pass
remove_org_repo_by_org_id = seafile_remove_org_repo_by_org_id
@searpc_func("objlist", ["int", "string"])
def list_org_repos_by_owner(org_id, user):
pass
@searpc_func("string", ["string"])
def get_org_repo_owner(repo_id):
pass
# org group repo
@searpc_func("int", ["string", "int", "int", "string", "string"])
def add_org_group_repo(repo_id, org_id, group_id, owner, permission):
pass
@searpc_func("int", ["string", "int", "int"])
def del_org_group_repo(repo_id, org_id, group_id):
pass
@searpc_func("string", ["int", "int"])
def get_org_group_repoids(org_id, group_id):
pass
@searpc_func("string", ["int", "int", "string"])
def get_org_group_repo_owner(org_id, group_id, repo_id):
pass
@searpc_func("objlist", ["int", "string"])
def get_org_group_repos_by_owner(org_id, user):
pass
@searpc_func("string", ["int", "string"])
def get_org_groups_by_repo(org_id, repo_id):
pass
@searpc_func("int", ["int", "int", "string", "string"])
def set_org_group_repo_permission(org_id, group_id, repo_id, permission):
pass
# inner pub repo
@searpc_func("int", ["string", "string"])
def set_inner_pub_repo(repo_id, permission):
pass
@searpc_func("int", ["string"])
def unset_inner_pub_repo(repo_id):
pass
@searpc_func("objlist", [])
def list_inner_pub_repos():
pass
@searpc_func("objlist", ["string"])
def list_inner_pub_repos_by_owner(user):
pass
@searpc_func("int64", [])
def count_inner_pub_repos():
pass
@searpc_func("int", ["string"])
def is_inner_pub_repo(repo_id):
pass
# org inner pub repo
@searpc_func("int", ["int", "string", "string"])
def set_org_inner_pub_repo(org_id, repo_id, permission):
pass
@searpc_func("int", ["int", "string"])
def unset_org_inner_pub_repo(org_id, repo_id):
pass
@searpc_func("objlist", ["int"])
def list_org_inner_pub_repos(org_id):
pass
@searpc_func("objlist", ["int", "string"])
def list_org_inner_pub_repos_by_owner(org_id, user):
pass
@searpc_func("int", ["string", "int"])
def set_repo_history_limit(repo_id, days):
pass
@searpc_func("int", ["string"])
def get_repo_history_limit(repo_id):
pass
# virtual repo
@searpc_func("string", ["string", "string", "string", "string", "string", "string"])
def create_virtual_repo(origin_repo_id, path, repo_name, repo_desc, owner, passwd=''):
pass
@searpc_func("objlist", ["string"])
def get_virtual_repos_by_owner(owner):
pass
@searpc_func("object", ["string", "string", "string"])
def get_virtual_repo(origin_repo, path, owner):
pass
# system default library
@searpc_func("string", [])
def get_system_default_repo_id():
pass
# Change password
@searpc_func("int", ["string", "string", "string", "string"])
def seafile_change_repo_passwd(repo_id, old_passwd, new_passwd, user):
pass
change_repo_passwd = seafile_change_repo_passwd
# Clean trash
@searpc_func("int", ["string", "int"])
def clean_up_repo_history(repo_id, keep_days):
pass
# Trashed repos
@searpc_func("objlist", ["int", "int"])
def get_trash_repo_list(start, limit):
pass
@searpc_func("int", ["string"])
def del_repo_from_trash(repo_id):
pass
@searpc_func("int", ["string"])
def restore_repo_from_trash(repo_id):
pass
@searpc_func("objlist", ["string"])
def get_trash_repos_by_owner(owner):
pass
@searpc_func("int", [])
def empty_repo_trash():
pass
@searpc_func("int", ["string"])
def empty_repo_trash_by_owner(owner):
pass
@searpc_func("object", ["string"])
def empty_repo_trash_by_owner(owner):
pass
@searpc_func("object", ["int", "string", "string"])
def generate_magic_and_random_key(enc_version, repo_id, password):
pass

View File

@ -1,3 +0,0 @@
seaservdir=${pyexecdir}/seaserv
seaserv_PYTHON = __init__.py service.py api.py

View File

@ -1,43 +0,0 @@
import service
from service import ccnet_rpc, seafserv_rpc, seafserv_threaded_rpc, ccnet_threaded_rpc
from service import send_command, check_quota, web_get_access_token, \
unset_repo_passwd, get_user_quota_usage, get_user_share_usage, \
get_user_quota
from service import get_emailusers, count_emailusers, get_session_info, \
get_emailuser_with_import
from service import get_org_groups, get_personal_groups_by_user, \
get_group_repoids, get_personal_groups, list_share_repos, remove_share, \
check_group_staff, remove_group_user, get_group, get_org_id_by_group, \
get_group_members, get_shared_groups_by_repo, is_group_user, \
get_org_group_repos, get_group_repos, get_org_groups_by_user, is_org_group,\
del_org_group_repo, get_org_groups_by_repo, get_org_group_repoids, \
get_group_repos_by_owner, unshare_group_repo
from service import get_repos, get_repo, get_commits, get_branches, remove_repo, \
get_org_repos, is_repo_owner, create_org_repo, is_inner_pub_repo, \
list_org_inner_pub_repos, get_org_id_by_repo_id, list_org_shared_repos, \
list_personal_shared_repos, is_personal_repo, list_inner_pub_repos, \
is_org_repo_owner, get_org_repo_owner, is_org_repo, get_file_size,\
list_personal_repos_by_owner, get_repo_token_nonnull, get_repo_owner, \
server_repo_size, get_file_id_by_path, get_commit, set_repo_history_limit,\
get_repo_history_limit, list_inner_pub_repos_by_owner, unset_inner_pub_repo,\
count_inner_pub_repos, edit_repo, list_dir_by_path, create_repo, remove_repo
from service import get_binding_peerids, is_valid_filename, check_permission,\
is_passwd_set
from service import create_org, get_orgs_by_user, get_org_by_url_prefix, \
get_user_current_org, add_org_user, remove_org_user, get_org_by_id, \
get_org_id_by_repo_id, is_org_staff, get_org_users_by_url_prefix, \
org_user_exists, list_org_repos_by_owner
from service import get_related_users_by_repo, get_related_users_by_org_repo
from service import post_empty_file, del_file
from service import CCNET_CONF_PATH, CCNET_SERVER_ADDR, CCNET_SERVER_PORT, \
MAX_UPLOAD_FILE_SIZE, MAX_DOWNLOAD_DIR_SIZE, FILE_SERVER_ROOT, \
CALC_SHARE_USAGE, SERVICE_URL, FILE_SERVER_PORT, SERVER_ID, \
SEAFILE_CENTRAL_CONF_DIR
from service import send_message
from api import seafile_api, ccnet_api

View File

@ -1,920 +0,0 @@
from service import seafserv_rpc, seafserv_threaded_rpc, ccnet_threaded_rpc
from pysearpc import SearpcError
"""
General rules for return values and exception handling of Seafile python API:
- Read operations return corresponding values. Raises exceptions on parameter errors
or I/O errors in seaf-server.
- Write or set operations return 0 on success, -1 on error. On error, an exceptioin
will be raised.
All paths in parameters can be in absolute path format (like '/test') or
relative path format (like 'test'). The API can handle both formats.
"""
class SeafileAPI(object):
def __init__(self):
pass
# fileserver token
def get_fileserver_access_token(self, repo_id, obj_id, op, username, use_onetime=True):
"""Generate token for access file/dir in fileserver
op: the operation, can be 'view', 'download', 'download-dir', 'downloadblks',
'upload', 'update', 'upload-blks-api', 'upload-blks-aj',
'update-blks-api', 'update-blks-aj'
Return: the access token in string
"""
onetime = 1 if bool(use_onetime) else 0
return seafserv_rpc.web_get_access_token(repo_id, obj_id, op, username,
onetime)
def query_fileserver_access_token(self, token):
"""Get the WebAccess object
token: the access token in string
Return: the WebAccess object (lib/webaccess.vala)
"""
return seafserv_rpc.web_query_access_token(token)
def query_zip_progress(self, token):
"""Query zip progress for download-dir, download-multi
token: obtained by get_fileserver_access_token
Return: json formated string `{"zipped":, "total":}`, otherwise None.
"""
return seafserv_rpc.query_zip_progress(token)
# password
def is_password_set(self, repo_id, username):
"""
Return non-zero if True, otherwise 0.
"""
return seafserv_rpc.is_passwd_set(repo_id, username)
def get_decrypt_key(self, repo_id, username):
"""
Return: a CryptKey object (lib/crypt.vala)
"""
return seafserv_rpc.get_decrypt_key(repo_id, username)
# repo manipulation
def create_repo(self, name, desc, username, passwd):
return seafserv_threaded_rpc.create_repo(name, desc, username, passwd)
def create_enc_repo(self, repo_id, name, desc, username, magic, random_key, enc_version):
return seafserv_threaded_rpc.create_enc_repo(repo_id, name, desc, username, magic, random_key, enc_version)
def get_repo(self, repo_id):
"""
Return: a Repo object (lib/repo.vala)
"""
return seafserv_threaded_rpc.get_repo(repo_id)
def remove_repo(self, repo_id):
return seafserv_threaded_rpc.remove_repo(repo_id)
def get_repo_list(self, start, limit):
"""
Return: a list of Repo objects (lib/repo.vala)
"""
return seafserv_threaded_rpc.get_repo_list(start, limit)
def count_repos(self):
return seafserv_threaded_rpc.count_repos()
def edit_repo(self, repo_id, name, description, username):
return seafserv_threaded_rpc.edit_repo(repo_id, name, description, username)
def is_repo_owner(self, username, repo_id):
"""
Return 1 if True, otherwise 0.
"""
return seafserv_threaded_rpc.is_repo_owner(username, repo_id)
def set_repo_owner(self, email, repo_id):
return seafserv_threaded_rpc.set_repo_owner(email, repo_id)
def get_repo_owner(self, repo_id):
"""
Return: repo owner in string
"""
return seafserv_threaded_rpc.get_repo_owner(repo_id)
def get_owned_repo_list(self, username, ret_corrupted=False):
"""
Return: a list of Repo objects
"""
return seafserv_threaded_rpc.list_owned_repos(username,
1 if ret_corrupted else 0)
def get_orphan_repo_list(self):
return seafserv_threaded_rpc.get_orphan_repo_list()
def get_repo_size(self, repo_id):
return seafserv_threaded_rpc.server_repo_size(repo_id)
def revert_repo(self, repo_id, commit_id, username):
return seafserv_threaded_rpc.revert_on_server(repo_id, commit_id, username)
def diff_commits(self, repo_id, old_commit, new_commit, fold_dir_diff = 1):
"""
Return: a list of DiffEntry objects (lib/repo.vala)
"""
return seafserv_threaded_rpc.get_diff(repo_id, old_commit, new_commit, fold_dir_diff)
def get_commit_list(self, repo_id, offset, limit):
"""
Return: a list of Commit objects (lib/commit.vala)
"""
return seafserv_threaded_rpc.get_commit_list(repo_id, offset, limit)
def change_repo_passwd(self, repo_id, old_passwd, new_passwd, user):
return seafserv_threaded_rpc.change_repo_passwd(repo_id, old_passwd,
new_passwd, user)
# File property and dir listing
def is_valid_filename(self, repo_id, filename):
"""
Return: 0 on invalid; 1 on valid.
"""
return seafserv_threaded_rpc.is_valid_filename(repo_id, filename)
def get_file_size(self, store_id, version, file_id):
return seafserv_threaded_rpc.get_file_size(store_id, version, file_id)
def get_dir_size(self, store_id, version, dir_id):
"""
Return the size of a dir. It needs to recursively calculate the size
of the dir. It can cause great delay before returning. Use with caution!
"""
return seafserv_threaded_rpc.get_dir_size(store_id, version, dir_id)
def get_file_id_by_path(self, repo_id, path):
"""
Returns None if path not found. Only raise exception on parameter or IO error.
"""
return seafserv_threaded_rpc.get_file_id_by_path(repo_id, path)
def get_file_id_by_commit_and_path(self, repo_id, commit_id, path):
return seafserv_threaded_rpc.get_file_id_by_commit_and_path(repo_id,
commit_id,
path)
def get_dirent_by_path(self, repo_id, path):
"""
Return: a Dirent object (lib/dirent.vala)
"""
return seafserv_threaded_rpc.get_dirent_by_path(repo_id, path)
def list_file_by_file_id(self, repo_id, file_id, offset=-1, limit=-1):
# deprecated, use list_blocks_by_file_id instead.
return seafserv_threaded_rpc.list_file_blocks(repo_id, file_id, offset, limit)
def list_blocks_by_file_id(self, repo_id, file_id, offset=-1, limit=-1):
"""
list block ids of a file.
Return: a string containing block list. Each id is seperated by '\n'
"""
return seafserv_threaded_rpc.list_file_blocks(repo_id, file_id, offset, limit)
def get_dir_id_by_path(self, repo_id, path):
return seafserv_threaded_rpc.get_dir_id_by_path(repo_id, path)
def list_dir_by_dir_id(self, repo_id, dir_id, offset=-1, limit=-1):
"""
Return: a list of Dirent objects. The objects are sorted as follows:
- Directories are always before files
- Entries are sorted by names in ascending order
"""
return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit)
def list_dir_by_path(self, repo_id, path, offset=-1, limit=-1):
dir_id = seafserv_threaded_rpc.get_dir_id_by_path(repo_id, path)
if dir_id is None:
return None
return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit)
def list_dir_by_commit_and_path(self, repo_id,
commit_id, path, offset=-1, limit=-1):
dir_id = seafserv_threaded_rpc.get_dir_id_by_commit_and_path(repo_id, commit_id, path)
if dir_id is None:
return None
return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit)
def get_dir_id_by_commit_and_path(self, repo_id, commit_id, path):
return seafserv_threaded_rpc.get_dir_id_by_commit_and_path(repo_id, commit_id, path)
# file/dir operations
def post_file(self, repo_id, tmp_file_path, parent_dir, filename, username):
"""Add a file to a directory"""
return seafserv_threaded_rpc.post_file(repo_id, tmp_file_path, parent_dir,
filename, username)
def post_empty_file(self, repo_id, parent_dir, filename, username):
return seafserv_threaded_rpc.post_empty_file(repo_id, parent_dir,
filename, username)
def put_file(self, repo_id, tmp_file_path, parent_dir, filename,
username, head_id):
"""Update an existing file
head_id: the original commit id of the old file
"""
return seafserv_threaded_rpc.put_file(repo_id, tmp_file_path, parent_dir,
filename, username, head_id)
def del_file(self, repo_id, parent_dir, filename, username):
return seafserv_threaded_rpc.del_file(repo_id, parent_dir, filename, username)
def copy_file(self, src_repo, src_dir, src_filename, dst_repo,
dst_dir, dst_filename, username, need_progress, synchronous=0):
return seafserv_threaded_rpc.copy_file(src_repo, src_dir, src_filename,
dst_repo, dst_dir, dst_filename,
username, need_progress, synchronous)
def move_file(self, src_repo, src_dir, src_filename, dst_repo, dst_dir,
dst_filename, replace, username, need_progress, synchronous=0):
return seafserv_threaded_rpc.move_file(src_repo, src_dir, src_filename,
dst_repo, dst_dir, dst_filename,
replace, username, need_progress, synchronous)
def get_copy_task(self, task_id):
return seafserv_rpc.get_copy_task(task_id)
def cancel_copy_task(self, task_id):
return seafserv_rpc.cancel_copy_task(task_id)
def rename_file(self, repo_id, parent_dir, oldname, newname, username):
return seafserv_threaded_rpc.rename_file(repo_id, parent_dir,
oldname, newname, username)
def post_dir(self, repo_id, parent_dir, dirname, username):
"""Add a directory"""
return seafserv_threaded_rpc.post_dir(repo_id, parent_dir, dirname, username)
def revert_file(self, repo_id, commit_id, path, username):
return seafserv_threaded_rpc.revert_file(repo_id, commit_id, path, username)
def revert_dir(self, repo_id, commit_id, path, username):
return seafserv_threaded_rpc.revert_dir(repo_id, commit_id, path, username)
def get_deleted(self, repo_id, show_days, path='/', scan_stat=None, limit=100):
"""
Get list of deleted paths.
@show_days: return deleted path in the last @show_days
@path: return deleted files under this path. The path will be recursively traversed.
@scan_stat: An opaque status returned by the last call. In the first call, None
must be passed. The last entry of the result list contains a 'scan_stat'
attribute. In the next call, pass in the returned 'scan_stat'.
@limit: Advisory maximum number of result entries returned. Sometimes more than @limit
entries will be returned.
Return a list of DeletedEntry objects (lib/repo.vala).
If no more deleted entries can be returned within the given time frame (specified by
@show_days) or all deleted entries in the history have been returned, 'None' will be
returned.
"""
return seafserv_threaded_rpc.get_deleted(repo_id, show_days, path, scan_stat, limit)
def get_file_revisions(self, repo_id, path, max_revision, limit, show_days=-1):
return seafserv_threaded_rpc.list_file_revisions(repo_id, path,
max_revision, limit,
show_days)
# This api is slow and should only be used for version 0 repos.
def get_files_last_modified(self, repo_id, parent_dir, limit):
"""Get last modification time for files in a dir
limit: the max number of commits to analyze
"""
return seafserv_threaded_rpc.calc_files_last_modified(repo_id,
parent_dir, limit)
def get_repo_history_limit(self, repo_id):
"""
Return repo history limit in days. Returns -1 if it's unlimited.
"""
return seafserv_threaded_rpc.get_repo_history_limit(repo_id)
def set_repo_history_limit(self, repo_id, days):
"""
Set repo history limit in days. Pass -1 if set to unlimited.
"""
return seafserv_threaded_rpc.set_repo_history_limit(repo_id, days)
def check_repo_blocks_missing(self, repo_id, blklist):
return seafserv_threaded_rpc.check_repo_blocks_missing(repo_id, blklist)
# file lock
def check_file_lock(self, repo_id, path, user):
"""
Always return 0 since CE doesn't support file locking.
"""
return 0
# share repo to user
def share_repo(self, repo_id, from_username, to_username, permission):
return seafserv_threaded_rpc.add_share(repo_id, from_username,
to_username, permission)
def remove_share(self, repo_id, from_username, to_username):
return seafserv_threaded_rpc.remove_share(repo_id, from_username,
to_username)
def set_share_permission(self, repo_id, from_username, to_username, permission):
return seafserv_threaded_rpc.set_share_permission(repo_id, from_username,
to_username, permission)
def share_subdir_to_user(self, repo_id, path, owner, share_user, permission, passwd=''):
return seafserv_threaded_rpc.share_subdir_to_user(repo_id, path, owner,
share_user, permission, passwd)
def unshare_subdir_for_user(self, repo_id, path, owner, share_user):
return seafserv_threaded_rpc.unshare_subdir_for_user(repo_id, path, owner,
share_user)
def update_share_subdir_perm_for_user(self, repo_id, path, owner,
share_user, permission):
return seafserv_threaded_rpc.update_share_subdir_perm_for_user(repo_id, path, owner,
share_user, permission)
def get_share_out_repo_list(self, username, start, limit):
"""
Get repo list shared by this user.
Return: a list of Repo objects
"""
return seafserv_threaded_rpc.list_share_repos(username, "from_email",
start, limit)
def get_share_in_repo_list(self, username, start, limit):
"""
Get repo list shared to this user.
"""
return seafserv_threaded_rpc.list_share_repos(username, "to_email",
start, limit)
def list_repo_shared_to(self, from_user, repo_id):
"""
Get user list this repo is shared to.
Return: a list of SharedUser objects (lib/repo.vala)
"""
return seafserv_threaded_rpc.list_repo_shared_to(from_user, repo_id)
# share repo to group
def group_share_repo(self, repo_id, group_id, username, permission):
# deprecated, use ``set_group_repo``
return seafserv_threaded_rpc.group_share_repo(repo_id, group_id,
username, permission)
def set_group_repo(self, repo_id, group_id, username, permission):
return seafserv_threaded_rpc.group_share_repo(repo_id, group_id,
username, permission)
def group_unshare_repo(self, repo_id, group_id, username):
# deprecated, use ``unset_group_repo``
return seafserv_threaded_rpc.group_unshare_repo(repo_id, group_id, username)
def unset_group_repo(self, repo_id, group_id, username):
return seafserv_threaded_rpc.group_unshare_repo(repo_id, group_id, username)
def get_shared_group_ids_by_repo(self, repo_id):
"""
Return: a string containing list of group ids. Each id is seperated by '\n'
"""
return seafserv_threaded_rpc.get_shared_groups_by_repo(repo_id)
def list_repo_shared_group(self, from_user, repo_id):
# deprecated, use list_repo_shared_group_by_user instead.
return seafserv_threaded_rpc.list_repo_shared_group(from_user, repo_id)
def list_repo_shared_group_by_user(self, from_user, repo_id):
"""
Return: a list of SharedGroup objects (lib/repo.vala)
"""
return seafserv_threaded_rpc.list_repo_shared_group(from_user, repo_id)
def share_subdir_to_group(self, repo_id, path, owner, share_group, permission, passwd=''):
return seafserv_threaded_rpc.share_subdir_to_group(repo_id, path, owner,
share_group, permission, passwd)
def unshare_subdir_for_group(self, repo_id, path, owner, share_group):
return seafserv_threaded_rpc.unshare_subdir_for_group(repo_id, path, owner,
share_group)
def update_share_subdir_perm_for_group(self, repo_id, path, owner,
share_group, permission):
return seafserv_threaded_rpc.update_share_subdir_perm_for_group(repo_id, path, owner,
share_group, permission)
def get_group_repoids(self, group_id):
"""
Return the list of group repo ids
"""
repo_ids = seafserv_threaded_rpc.get_group_repoids(group_id)
if not repo_ids:
return []
l = []
for repo_id in repo_ids.split("\n"):
if repo_id == '':
continue
l.append(repo_id)
return l
def get_group_repo_list(self, group_id):
# deprecated, use get_repos_by_group instead.
ret = []
for repo_id in self.get_group_repoids(group_id):
r = self.get_repo(repo_id)
if r is None:
continue
ret.append(r)
return ret
def get_repos_by_group(self, group_id):
"""
Return: a list of Repo objects
"""
return seafserv_threaded_rpc.get_repos_by_group(group_id)
def get_group_repos_by_owner(self, username):
"""
Get all repos a user share to any group
Return: a list of Repo objects
"""
return seafserv_threaded_rpc.get_group_repos_by_owner(username)
def remove_group_repos_by_owner(self, group_id, username):
"""
Unshare all repos a user shared to a group.
"""
return seafserv_threaded_rpc.remove_repo_group(group_id, username)
def remove_group_repos(self, group_id):
"""
Remove all repos under group.
Return: 0 success; -1 failed
"""
return seafserv_threaded_rpc.remove_repo_group(group_id, None)
def set_group_repo_permission(self, group_id, repo_id, permission):
return seafserv_threaded_rpc.set_group_repo_permission(group_id, repo_id,
permission)
def get_shared_users_for_subdir(self, repo_id, path, from_user):
"""
Get all users a path is shared to.
Return: a list of SharedUser objects.
"""
return seafserv_threaded_rpc.get_shared_users_for_subdir(repo_id, path, from_user)
def get_shared_groups_for_subdir(self, repo_id, path, from_user):
"""
Get all groups a path is shared to.
Return: a list of SharedGroup objects.
"""
return seafserv_threaded_rpc.get_shared_groups_for_subdir(repo_id, path, from_user)
# organization wide repo
def add_inner_pub_repo(self, repo_id, permission):
return seafserv_threaded_rpc.set_inner_pub_repo(repo_id, permission)
def remove_inner_pub_repo(self, repo_id):
return seafserv_threaded_rpc.unset_inner_pub_repo(repo_id)
def get_inner_pub_repo_list(self):
"""
Return: a list of Repo objects.
"""
return seafserv_threaded_rpc.list_inner_pub_repos()
def list_inner_pub_repos_by_owner(self, repo_owner):
"""
Return: a list of Repo objects.
"""
return seafserv_threaded_rpc.list_inner_pub_repos_by_owner(repo_owner)
def count_inner_pub_repos(self):
return seafserv_threaded_rpc.count_inner_pub_repos()
def is_inner_pub_repo(self, repo_id):
return seafserv_threaded_rpc.is_inner_pub_repo(repo_id)
# permission checks
def check_permission(self, repo_id, user):
"""
Check repo share permissions. Only check user share, group share and inner-pub
shares.
Return: 'r', 'rw', or None
"""
return seafserv_threaded_rpc.check_permission(repo_id, user)
def check_permission_by_path(self, repo_id, path, user):
"""
Check both repo share permission and sub-folder access permissions.
This function should be used when updating file/folder in a repo.
In CE, this function is equivalent to check_permission.
Return: 'r', 'rw', or None
"""
return seafserv_threaded_rpc.check_permission_by_path(repo_id, path, user)
# token
def generate_repo_token(self, repo_id, username):
"""Generate a token for sync a repo
"""
return seafserv_threaded_rpc.generate_repo_token(repo_id, username)
def delete_repo_token(self, repo_id, token, user):
return seafserv_threaded_rpc.delete_repo_token(repo_id, token, user)
def list_repo_tokens(self, repo_id):
"""
Return: a list of RepoTokenInfo objects.
"""
return seafserv_threaded_rpc.list_repo_tokens(repo_id)
def list_repo_tokens_by_email(self, username):
return seafserv_threaded_rpc.list_repo_tokens_by_email(username)
def delete_repo_tokens_by_peer_id(self, email, peer_id):
return seafserv_threaded_rpc.delete_repo_tokens_by_peer_id(email, peer_id)
def delete_repo_tokens_by_email(self, email):
return seafserv_threaded_rpc.delete_repo_tokens_by_email(email)
# quota
def get_user_self_usage(self, username):
"""Get the sum of repos' size of the user"""
return seafserv_threaded_rpc.get_user_quota_usage(username)
def get_user_share_usage(self, username):
# sum (repo_size * number_of_shares)
return seafserv_threaded_rpc.get_user_share_usage(username)
def get_user_quota(self, username):
"""
Return: -2 if quota is unlimited; otherwise it must be number > 0.
"""
return seafserv_threaded_rpc.get_user_quota(username)
def set_user_quota(self, username, quota):
return seafserv_threaded_rpc.set_user_quota(username, quota)
def get_user_share_quota(self, username):
return -2 # unlimited
def set_user_share_quota(self, username, quota):
pass
def check_quota(self, repo_id):
pass
# encrypted repo password management
def check_passwd(self, repo_id, magic):
return seafserv_threaded_rpc.check_passwd(repo_id, magic)
def set_passwd(self, repo_id, user, passwd):
return seafserv_threaded_rpc.set_passwd(repo_id, user, passwd)
def unset_passwd(self, repo_id, user, passwd):
return seafserv_threaded_rpc.unset_passwd(repo_id, user, passwd)
def generate_magic_and_random_key(self, enc_version, repo_id, password):
return seafserv_threaded_rpc.generate_magic_and_random_key(enc_version, repo_id, password)
# virtual repo
def create_virtual_repo(self, origin_repo_id, path, repo_name, repo_desc, owner, passwd=''):
return seafserv_threaded_rpc.create_virtual_repo(origin_repo_id,
path,
repo_name,
repo_desc,
owner,
passwd)
def get_virtual_repos_by_owner(self, owner):
return seafserv_threaded_rpc.get_virtual_repos_by_owner(owner)
def get_virtual_repo(self, origin_repo, path, owner):
return seafserv_threaded_rpc.get_virtual_repo(origin_repo, path, owner)
# Clean trash
def clean_up_repo_history(self, repo_id, keep_days):
return seafserv_threaded_rpc.clean_up_repo_history(repo_id, keep_days)
# Trashed repos
def get_trash_repo_list(self, start, limit):
return seafserv_threaded_rpc.get_trash_repo_list(start, limit)
def del_repo_from_trash(self, repo_id):
return seafserv_threaded_rpc.del_repo_from_trash(repo_id)
def restore_repo_from_trash(self, repo_id):
return seafserv_threaded_rpc.restore_repo_from_trash(repo_id)
def get_trash_repos_by_owner(self, owner):
return seafserv_threaded_rpc.get_trash_repos_by_owner(owner)
def empty_repo_trash(self):
return seafserv_threaded_rpc.empty_repo_trash()
def empty_repo_trash_by_owner(self, owner):
return seafserv_threaded_rpc.empty_repo_trash_by_owner(owner)
seafile_api = SeafileAPI()
class CcnetAPI(object):
def __init__(self):
pass
# user management
def add_emailuser(self, email, passwd, is_staff, is_active):
return ccnet_threaded_rpc.add_emailuser(email, passwd, is_staff, is_active)
def remove_emailuser(self, source, email):
"""
source can be 'DB' or 'LDAP'.
- 'DB': remove a user created in local database
- 'LDAP': remove a user imported from LDAP
"""
return ccnet_threaded_rpc.remove_emailuser(source, email)
def validate_emailuser(self, email, passwd):
"""
Verify user's password on login. Can be used to verify DB and LDAP users.
The function first verify password with LDAP, then local database.
"""
return ccnet_threaded_rpc.validate_emailuser(email, passwd)
def get_emailuser(self, email):
"""
Only return local database user or imported LDAP user.
It first lookup user from local database, if not found, lookup imported
LDAP user.
Return: a list of EmailUser objects (ccnet/lib/ccnetobj.vala)
The 'source' attribute of EmailUser object is set to 'LDAPImport' for LDAP
imported user, and 'DB' for local database user.
"""
return ccnet_threaded_rpc.get_emailuser(email)
def get_emailuser_with_import(self, email):
"""
The same as get_emailuser() but import the user from LDAP if it was not
imported yet.
"""
return ccnet_threaded_rpc.get_emailuser_with_import(email)
def get_emailuser_by_id(self, user_id):
"""
Get a user from local database with the db index id.
"""
return ccnet_threaded_rpc.get_emailuser_by_id(user_id)
def get_emailusers(self, source, start, limit, is_active=None):
"""
source:
- 'DB': return local db users
- 'LDAPImport': return imported LDAP users
- 'LDAP': retrieve users directly from LDAP server
start: offset to start retrieving, -1 to start from the beginning
limit: number of users to get, -1 to get all user from start
is_active: True to return only active users; False to return inactive users;
None to return all users.
Return: a list of EmailUser objects.
"""
if is_active is True:
status = "active" # list active users
elif is_active is False:
status = "inactive" # list inactive users
else:
status = "" # list all users
return ccnet_threaded_rpc.get_emailusers(source, start, limit, status)
def search_emailusers(self, source, email_patt, start, limit):
"""
Search for users whose name contains @email_patt.
source: 'DB' for local db users; 'LDAP' for imported LDAP users.
This function cannot search LDAP users directly in LDAP server.
"""
return ccnet_threaded_rpc.search_emailusers(source, email_patt, start, limit)
def search_ldapusers(self, keyword, start, limit):
"""
Search for users whose name contains @keyword directly from LDAP server.
"""
return ccnet_threaded_rpc.search_ladpusers(keyword, start, limit)
def count_emailusers(self, source):
"""
Return the number of active users by source.
source: 'DB' for local db users; 'LDAP' for imported LDAP users.
"""
return ccnet_threaded_rpc.count_emailusers(source)
def count_inactive_emailusers(self, source):
"""
Return the number of inactive users by source.
source: 'DB' for local db users; 'LDAP' for imported LDAP users.
"""
return ccnet_threaded_rpc.count_inactive_emailusers(source)
def update_emailuser(self, source, user_id, password, is_staff, is_active):
"""
source: 'DB' for local db user; 'LDAP' for imported LDAP user.
user_id: usually not changed.
password: new password in plain text. Only effective for DB users.
If '!' is passed, the password won't be updated.
is_staff: change superuser status
is_active: activate or deactivate user
"""
return ccnet_threaded_rpc.update_emailuser(source, user_id, password, is_staff, is_active)
def update_role_emailuser(self, email, role):
return ccnet_threaded_rpc.update_role_emailuser(email, role)
def get_superusers(self):
"""
Return: a list of EmailUser objects.
"""
return ccnet_threaded_rpc.get_superusers()
# group management
def create_group(self, group_name, user_name, gtype=None):
"""
For CE, gtype is not used and should always be None.
"""
return ccnet_threaded_rpc.create_group(group_name, user_name, gtype)
def create_org_group(self, org_id, group_name, user_name):
return ccnet_threaded_rpc.create_org_group(org_id, group_name, user_name)
def remove_group(self, group_id):
"""
permission check should be done before calling this function.
"""
return ccnet_threaded_rpc.remove_group(group_id)
def group_add_member(self, group_id, user_name, member_name):
"""
user_name: unused.
"""
return ccnet_threaded_rpc.group_add_member(group_id, user_name, member_name)
def group_remove_member(self, group_id, user_name, member_name):
"""
user_name: unused.
"""
return ccnet_threaded_rpc.group_remove_member(group_id, user_name, member_name)
def group_set_admin(self, group_id, member_name):
"""
No effect if member_name is not in the group.
"""
return ccnet_threaded_rpc.group_set_admin(group_id, member_name)
def group_unset_admin(self, group_id, member_name):
"""
No effect if member_name is not in the group.
"""
return ccnet_threaded_rpc.group_unset_admin(group_id, member_name)
def set_group_name(self, group_id, group_name):
return ccnet_threaded_rpc.set_group_name(group_id, group_name)
def quit_group(self, group_id, user_name):
return ccnet_threaded_rpc.quit_group(group_id, user_name)
def get_groups(self, user_name):
"""
Get all groups the user belongs to.
Return: a list of Group objects (ccnet/lib/ccnetobj.vala)
"""
return ccnet_threaded_rpc.get_groups(user_name)
def get_all_groups(self, start, limit, source=None):
"""
For CE, source is not used and should alwasys be None.
"""
return ccnet_threaded_rpc.get_all_groups(start, limit, source)
def get_group(self, group_id):
return ccnet_threaded_rpc.get_group(group_id)
def get_group_members(self, group_id):
"""
Return a list of GroupUser objects (ccnet/lib/ccnetobj.vala)
"""
return ccnet_threaded_rpc.get_group_members(group_id)
def check_group_staff(self, group_id, username):
"""
Return non-zero value if true, 0 if not true
"""
return ccnet_threaded_rpc.check_group_staff(group_id, username)
def remove_group_user(self, username):
return ccnet_threaded_rpc.remove_group_user(username)
def is_group_user(self, group_id, user):
"""
Return non-zero value if true, 0 if not true
"""
return ccnet_threaded_rpc.is_group_user(group_id, user)
def set_group_creator(self, group_id, user_name):
return ccnet_threaded_rpc.set_group_creator(group_id, user_name)
# organization management
def create_org(self, org_name, url_prefix, creator):
return ccnet_threaded_rpc.create_org(org_name, url_prefix, creator)
def remove_org(self, org_id):
return ccnet_threaded_rpc.remove_org(org_id)
def get_all_orgs(self, start, limit):
"""
Return a list of Organization objects (ccnet/lib/ccnetobj.vala)
"""
return ccnet_threaded_rpc.get_all_orgs(start, limit)
def count_orgs(self):
return ccnet_threaded_rpc.count_orgs()
def get_org_by_url_prefix(self, url_prefix):
"""
Return an Organizaion object.
"""
return ccnet_threaded_rpc.get_org_by_url_prefix(url_prefix)
def get_org_by_id(self, org_id):
return ccnet_threaded_rpc.get_org_by_id(org_id)
def add_org_user(self, org_id, email, is_staff):
return ccnet_threaded_rpc.add_org_user(org_id, email, is_staff)
def remove_org_user(self, org_id, email):
return ccnet_threaded_rpc.remove_org_user(org_id, email)
def get_orgs_by_user(self, email):
return ccnet_threaded_rpc.get_orgs_by_user(email)
def get_org_emailusers(self, url_prefix, start, limit):
"""
Return a list of EmailUser objects.
"""
return ccnet_threaded_rpc.get_org_emailusers(url_prefix, start, limit)
def add_org_group(self, org_id, group_id):
return ccnet_threaded_rpc.add_org_group(org_id, group_id)
def remove_org_group(self, org_id, group_id):
return ccnet_threaded_rpc.remove_org_group(org_id, group_id)
def is_org_group(self, group_id):
"""
Return non-zero if True, otherwise 0.
"""
return ccnet_threaded_rpc.is_org_group(group_id)
def get_org_id_by_group(self, group_id):
return ccnet_threaded_rpc.get_org_id_by_group(group_id)
def get_org_groups(self, org_id, start, limit):
"""
Return a list of int, each int is group id.
"""
return ccnet_threaded_rpc.get_org_groups(org_id, start, limit)
def org_user_exists(self, org_id, email):
"""
Return non-zero if True, otherwise 0.
"""
return ccnet_threaded_rpc.org_user_exists(org_id, email)
def is_org_staff(self, org_id, user):
"""
Return non-zero if True, otherwise 0.
"""
return ccnet_threaded_rpc.is_org_staff(org_id, user)
def set_org_staff(self, org_id, user):
return ccnet_threaded_rpc.set_org_staff(org_id, user)
def unset_org_staff(self, org_id, user):
return ccnet_threaded_rpc.unset_org_staff(org_id, user)
def set_org_name(self, org_id, org_name):
return ccnet_threaded_rpc.set_org_name(org_id, org_name)
ccnet_api = CcnetAPI()

View File

@ -1,958 +0,0 @@
from datetime import datetime
import json
import logging
import os
import sys
import ConfigParser
from urlparse import urlparse
import ccnet
import seafile
import re
from pysearpc import SearpcError
_DEBUG = 'SEAFILE_DEBUG' in os.environ
ENVIRONMENT_VARIABLES = ('CCNET_CONF_DIR', 'SEAFILE_CONF_DIR')
# Used to fix bug in some rpc calls, will be removed in near future.
MAX_INT = 2147483647
def _load_path_from_env(key, check=True):
v = os.environ.get(key, '')
if not v:
if check:
raise ImportError("Seaserv cannot be imported, because environment variable %s is undefined." % key)
return None
if _DEBUG:
print "Loading %s from %s" % (key, v)
return os.path.normpath(os.path.expanduser(v))
CCNET_CONF_PATH = _load_path_from_env('CCNET_CONF_DIR')
SEAFILE_CONF_DIR = _load_path_from_env('SEAFILE_CONF_DIR')
SEAFILE_CENTRAL_CONF_DIR = _load_path_from_env('SEAFILE_CENTRAL_CONF_DIR', check=False)
pool = ccnet.ClientPool(CCNET_CONF_PATH, central_config_dir=SEAFILE_CENTRAL_CONF_DIR)
ccnet_rpc = ccnet.CcnetRpcClient(pool, req_pool=True)
ccnet_threaded_rpc = ccnet.CcnetThreadedRpcClient(pool, req_pool=True)
seafserv_rpc = seafile.ServerRpcClient(pool, req_pool=True)
seafserv_threaded_rpc = seafile.ServerThreadedRpcClient(pool, req_pool=True)
# load ccnet server addr and port from ccnet.conf.
# 'addr:port' is used when downloading a repo
config = ConfigParser.ConfigParser()
config.read(os.path.join(SEAFILE_CENTRAL_CONF_DIR if SEAFILE_CENTRAL_CONF_DIR else CCNET_CONF_PATH,
'ccnet.conf'))
if config.has_option('General', 'SERVICE_URL'):
service_url = config.get('General', 'SERVICE_URL')
hostname = urlparse(service_url).hostname
SERVICE_URL = service_url
CCNET_SERVER_ADDR = hostname
if config.has_option('Network', 'PORT'):
CCNET_SERVER_PORT = config.get('Network', 'PORT')
else:
CCNET_SERVER_PORT = 10001
else:
print "Warning: SERVICE_URL not set in ccnet.conf"
CCNET_SERVER_ADDR = None
CCNET_SERVER_PORT = None
SERVICE_URL = None
SERVER_ID = config.get('General', 'ID')
config.read(os.path.join(SEAFILE_CENTRAL_CONF_DIR if SEAFILE_CENTRAL_CONF_DIR else SEAFILE_CONF_DIR,
'seafile.conf'))
def get_fileserver_option(key, default):
'''
"fileserver" used to be "httpserver"
'''
for section in ('fileserver', 'httpserver'):
if config.has_option(section, key):
return config.get(section, key)
return default
MAX_UPLOAD_FILE_SIZE = None # Defaults to no limit
try:
max_upload_size_mb = int(get_fileserver_option('max_upload_size', 0))
if max_upload_size_mb > 0:
MAX_UPLOAD_FILE_SIZE = max_upload_size_mb * (2 ** 20)
except ValueError:
pass
MAX_DOWNLOAD_DIR_SIZE = 100 * (2 ** 20) # Default max size of a downloadable dir
try:
max_download_dir_size_mb = int(get_fileserver_option('max_download_dir_size', 0))
if max_download_dir_size_mb > 0:
MAX_DOWNLOAD_DIR_SIZE = max_download_dir_size_mb * (2 ** 20)
except ValueError:
pass
FILE_SERVER_PORT = get_fileserver_option('port', '8082')
if CCNET_SERVER_ADDR:
FILE_SERVER_ROOT = 'http://' + CCNET_SERVER_ADDR + ':' + FILE_SERVER_PORT
else:
FILE_SERVER_ROOT = None
CALC_SHARE_USAGE = False
if config.has_option('quota', 'calc_share_usage'):
CALC_SHARE_USAGE = config.getboolean('quota', 'calc_share_usage')
# Get an instance of a logger
logger = logging.getLogger(__name__)
#### Basic ccnet API ####
def get_emailusers(source, start, limit, is_active=None):
if is_active is True:
status = "active" # list active users
elif is_active is False:
status = "inactive" # list inactive users
else:
status = "" # list all users
return ccnet_threaded_rpc.get_emailusers(source, start, limit, status)
def count_emailusers():
try:
ret = ccnet_threaded_rpc.count_emailusers()
except SearpcError:
ret = -1
return 0 if ret < 0 else ret
def get_emailuser_with_import(email):
return ccnet_threaded_rpc.get_emailuser_with_import(email)
def get_session_info():
return ccnet_rpc.get_session_info()
# group
def get_group(group_id):
group_id_int = int(group_id)
try:
group = ccnet_threaded_rpc.get_group(group_id_int)
except SearpcError:
group = None
return group
def get_personal_groups(start, limit):
try:
groups_all = ccnet_threaded_rpc.get_all_groups(start, limit)
except SearpcError:
return []
return [ x for x in groups_all if not is_org_group(x.id) ]
def get_personal_groups_by_user(email):
try:
groups_all = ccnet_threaded_rpc.get_groups(email)
except SearpcError:
return []
return [ x for x in groups_all if not is_org_group(x.id) ]
# group user
def is_group_user(group_id, user):
try:
ret = ccnet_threaded_rpc.is_group_user(group_id, user)
except SearpcError:
ret = 0
return ret
def check_group_staff(group_id, username):
"""Check where user is group staff"""
group_id = int(group_id)
try:
ret = ccnet_threaded_rpc.check_group_staff(group_id, username)
except SearpcError, e:
logger.error(e)
ret = 0
return True if ret == 1 else False
def remove_group_user(user):
"""
Remove group user relationship.
"""
return ccnet_threaded_rpc.remove_group_user(user)
def get_group_members(group_id, start=-1, limit=-1):
group_id_int = int(group_id)
try:
members = ccnet_threaded_rpc.get_group_members(group_id_int)
except SearpcError:
members = []
return members
# org group
def is_org_group(group_id):
try:
ret = ccnet_threaded_rpc.is_org_group(group_id)
except SearpcError:
ret = -1
return True if ret == 1 else False
def get_org_id_by_group(group_id):
try:
org_id = ccnet_threaded_rpc.get_org_id_by_group(group_id)
except SearpcError:
org_id = -1
return org_id
def get_org_groups(org_id, start, limit):
try:
groups = ccnet_threaded_rpc.get_org_groups(org_id, start, limit)
except SearpcError:
groups = []
return groups
def get_org_groups_by_user(org_id, user):
"""
Get user's groups in org.
"""
try:
groups_all = ccnet_threaded_rpc.get_groups(user)
except SearpcError:
return []
return [ x for x in groups_all if org_id == get_org_id_by_group(x.id) ]
# org
def create_org(org_name, url_prefix, username):
ccnet_threaded_rpc.create_org(org_name, url_prefix, username)
def get_org_by_url_prefix(url_prefix):
try:
org = ccnet_threaded_rpc.get_org_by_url_prefix(url_prefix)
except SearpcError:
org = None
return org
def get_org_by_id(org_id):
try:
org = ccnet_threaded_rpc.get_org_by_id(org_id)
except SearpcError:
org = None
return org
# org user
def add_org_user(org_id, email, is_staff):
try:
ccnet_threaded_rpc.add_org_user(org_id, email, is_staff)
except SearpcError:
pass
def remove_org_user(org_id, email):
try:
ccnet_threaded_rpc.remove_org_user(org_id, email)
except SearpcError:
pass
def org_user_exists(org_id, user):
try:
ret = ccnet_threaded_rpc.org_user_exists(org_id, user)
except SearpcError:
ret = -1
return True if ret == 1 else False
def get_org_users_by_url_prefix(url_prefix, start, limit):
"""
List org users.
"""
try:
users = ccnet_threaded_rpc.get_org_emailusers(url_prefix, start, limit)
except:
users = []
return users
def get_orgs_by_user(user):
try:
orgs = ccnet_threaded_rpc.get_orgs_by_user(user)
except SearpcError:
orgs = []
return orgs
def is_org_staff(org_id, user):
"""
Check whether user is staff of a org.
"""
try:
ret = ccnet_threaded_rpc.is_org_staff(org_id, user)
except SearpcError:
ret = -1
return True if ret == 1 else False
def get_user_current_org(user, url_prefix):
orgs = get_orgs_by_user(user)
for org in orgs:
if org.url_prefix == url_prefix:
return org
return None
def send_command(command):
client = pool.get_client()
client.send_cmd(command)
ret = client.response[2]
pool.return_client(client)
return ret
def send_message(msg_type, content):
client = pool.get_client()
client.send_message(msg_type, content)
pool.return_client(client)
def get_binding_peerids(email):
"""Get peer ids of a given email"""
try:
peer_ids = ccnet_threaded_rpc.get_binding_peerids(email)
except SearpcError:
return []
if not peer_ids:
return []
peerid_list = []
for peer_id in peer_ids.split("\n"):
if peer_id == '':
continue
peerid_list.append(peer_id)
return peerid_list
######## seafserv API ####
# repo
def get_repos():
"""
Return repository list.
"""
return seafserv_threaded_rpc.get_repo_list("", 100)
def get_repo(repo_id):
return seafserv_threaded_rpc.get_repo(repo_id)
def edit_repo(repo_id, name, desc, user):
try:
ret = seafserv_threaded_rpc.edit_repo(repo_id, name, desc, user)
except SearpcError, e:
ret = -1
return True if ret == 0 else False
def create_repo(name, desc, user, passwd):
"""
Return repo id if successfully created a repo, otherwise None.
"""
try:
ret = seafserv_threaded_rpc.create_repo(name, desc, user, passwd)
except SearpcError, e:
logger.error(e)
ret = None
return ret
def remove_repo(repo_id):
"""
Return true if successfully removed a repo, otherwise false.
"""
try:
ret = seafserv_threaded_rpc.remove_repo(repo_id)
except SearpcError, e:
logger.error(e)
ret = -1
return True if ret == 0 else False
def list_personal_repos_by_owner(owner):
"""
List users owned repos in personal context.
"""
try:
repos = seafserv_threaded_rpc.list_owned_repos(owner)
except SearpcError:
repos = []
return repos
def get_repo_token_nonnull(repo_id, username):
return seafserv_threaded_rpc.get_repo_token_nonnull (repo_id, username)
def get_repo_owner(repo_id):
"""
Get owner of a repo.
"""
try:
ret = seafserv_threaded_rpc.get_repo_owner(repo_id)
except SearpcError:
ret = ''
return ret
def is_repo_owner(user, repo_id):
"""
Check whether user is repo owner.
"""
try:
ret = seafserv_threaded_rpc.is_repo_owner(user, repo_id)
except SearpcError:
ret = 0
return ret
def server_repo_size(repo_id):
try:
size = seafserv_threaded_rpc.server_repo_size(repo_id)
except SearpcError:
size = 0
return size
# org repo
def create_org_repo(repo_name, repo_desc, user, passwd, org_id):
"""
Create org repo, return valid repo id if success.
"""
try:
repo_id = seafserv_threaded_rpc.create_org_repo(repo_name, repo_desc,
user, passwd, org_id)
except SearpcError:
repo_id = None
return repo_id
def is_org_repo(repo_id):
org_id = get_org_id_by_repo_id(repo_id)
return True if org_id > 0 else False
def list_org_repos_by_owner(org_id, user):
try:
repos = seafserv_threaded_rpc.list_org_repos_by_owner(org_id, user)
except SearpcError:
repos = []
return repos
def get_org_repos(org_id, start, limit):
"""
List repos created in org.
"""
try:
repos = seafserv_threaded_rpc.get_org_repo_list(org_id, start, limit)
except SearpcError:
repos = []
if repos:
for r in repos:
r.owner = get_org_repo_owner(r.id)
return repos
def get_org_id_by_repo_id(repo_id):
"""
Get org id according repo id.
"""
try:
org_id = seafserv_threaded_rpc.get_org_id_by_repo_id(repo_id)
except SearpcError:
org_id = -1
return org_id
def is_org_repo_owner(org_id, repo_id, user):
"""
Check whether user is org repo owner.
NOTE:
`org_id` may used in future.
"""
owner = get_org_repo_owner(repo_id)
if not owner:
return False
return True if owner == user else False
def get_org_repo_owner(repo_id):
"""
Get owner of org repo.
"""
try:
owner = seafserv_threaded_rpc.get_org_repo_owner(repo_id)
except SearpcError:
owner = None
return owner
# commit
def get_commit(repo_id, repo_version, cmt_id):
""" Get a commit. """
try:
ret = seafserv_threaded_rpc.get_commit(repo_id, repo_version, cmt_id)
except SearpcError:
ret = None
return ret
def get_commits(repo_id, offset, limit):
"""Get commit lists."""
try:
ret = seafserv_threaded_rpc.get_commit_list(repo_id, offset, limit)
except SearpcError:
ret = None
return ret
# branch
def get_branches(repo_id):
"""Get branches of a given repo"""
return seafserv_threaded_rpc.branch_gets(repo_id)
# group repo
def get_group_repos_by_owner(user):
"""
List user's repos that are sharing to groups
"""
try:
ret = seafserv_threaded_rpc.get_group_repos_by_owner(user)
except SearpcError:
ret = []
return ret
def get_shared_groups_by_repo(repo_id):
try:
group_ids = seafserv_threaded_rpc.get_shared_groups_by_repo(repo_id)
except SearpcError:
group_ids = ''
if not group_ids:
return []
groups = []
for group_id in group_ids.split('\n'):
if not group_id:
continue
group = get_group(group_id)
if group:
groups.append(group)
return groups
def conv_repoids_to_list(repo_ids):
"""
Convert repo ids seperated by "\n" to list.
"""
if not repo_ids:
return []
repoid_list = []
for repo_id in repo_ids.split("\n"):
if repo_id == '':
continue
repoid_list.append(repo_id)
return repoid_list
def get_group_repoids(group_id):
"""Get repo ids of a given group id."""
try:
repo_ids = seafserv_threaded_rpc.get_group_repoids(group_id)
except SearpcError:
return []
return conv_repoids_to_list(repo_ids)
def get_group_repos(group_id, user):
"""Get repos of a given group id."""
repoid_list = get_group_repoids(group_id)
repos = []
for repo_id in repoid_list:
if not repo_id:
continue
repo = get_repo(repo_id)
if not repo:
continue
repo.owner = seafserv_threaded_rpc.get_group_repo_owner(repo_id)
repo.share_from_me = True if user == repo.owner else False
last_commit = get_commits(repo.id, 0, 1)[0]
repo.latest_modify = last_commit.ctime if last_commit else None
repos.append(repo)
repos.sort(lambda x, y: cmp(y.latest_modify, x.latest_modify))
return repos
# org group repo
def del_org_group_repo(repo_id, org_id, group_id):
seafserv_threaded_rpc.del_org_group_repo(repo_id, org_id, group_id)
def get_org_group_repoids(org_id, group_id):
try:
repo_ids = seafserv_threaded_rpc.get_org_group_repoids(org_id, group_id)
except SearpcError:
repo_ids = ''
return conv_repoids_to_list(repo_ids)
def get_org_group_repos(org_id, group_id, user):
"""Get org repos of a given group id."""
repoid_list = get_org_group_repoids(org_id, group_id)
if not repoid_list:
return []
repos = []
for repo_id in repoid_list:
if not repo_id:
continue
repo = get_repo(repo_id)
if not repo:
continue
repo.owner = seafserv_threaded_rpc.get_org_group_repo_owner(org_id,
group_id,
repo_id)
repo.sharecd_from_me = True if user == repo.owner else False
last_commit = get_commits(repo.id, 0, 1)[0]
repo.latest_modify = last_commit.ctime if last_commit else None
repos.append(repo)
repos.sort(lambda x, y: cmp(y.latest_modify, x.latest_modify))
return repos
def get_org_groups_by_repo(org_id, repo_id):
try:
group_ids = seafserv_threaded_rpc.get_org_groups_by_repo(org_id,
repo_id)
except SearpcError:
group_ids = ''
if not group_ids:
return []
groups = []
for group_id in group_ids.split('\n'):
if not group_id:
continue
group = get_group(group_id)
if group:
groups.append(group)
return groups
# inner pub repo
def list_inner_pub_repos_by_owner(user):
"""
List a user's inner pub repos.
"""
try:
ret = seafserv_threaded_rpc.list_inner_pub_repos_by_owner(user)
except SearpcError:
ret = []
return ret
def list_inner_pub_repos(username):
"""
List inner pub repos, which can be access by everyone.
"""
try:
shared_repos = seafserv_threaded_rpc.list_inner_pub_repos()
except:
shared_repos = []
for repo in shared_repos:
repo.user_perm = check_permission(repo.props.repo_id, username)
shared_repos.sort(lambda x, y: cmp(y.props.last_modified, x.props.last_modified))
return shared_repos
def count_inner_pub_repos():
try:
ret = seafserv_threaded_rpc.count_inner_pub_repos()
except SearpcError:
ret = -1
return 0 if ret < 0 else ret
def is_inner_pub_repo(repo_id):
"""
Check whether a repo is public.
Return 0 if repo is not inner public, otherwise non-zero.
"""
try:
ret = seafserv_threaded_rpc.is_inner_pub_repo(repo_id)
except SearpcError:
ret = 0
return ret
def unset_inner_pub_repo(repo_id):
seafserv_threaded_rpc.unset_inner_pub_repo(repo_id)
# org inner pub repo
def list_org_inner_pub_repos(org_id, username, start=None, limit=None):
"""
List org inner pub repos, which can be access by all org members.
"""
try:
shared_repos = seafserv_threaded_rpc.list_org_inner_pub_repos(org_id)
except SearpcError:
shared_repos = []
for repo in shared_repos:
repo.user_perm = check_permission(repo.props.repo_id, username)
# sort repos by last modify time
shared_repos.sort(lambda x, y: cmp(y.props.last_modified, x.props.last_modified))
return shared_repos
# repo permissoin
def check_permission(repo_id, user):
"""
Check whether user has permission to access repo.
Return values can be 'rw' or 'r' or None.
"""
try:
ret = seafserv_threaded_rpc.check_permission(repo_id, user)
except SearpcError:
ret = None
return ret
def is_personal_repo(repo_id):
"""
Check whether repo is personal repo.
"""
try:
owner = seafserv_threaded_rpc.get_repo_owner(repo_id)
except SearpcError:
owner = ''
return True if owner else False
# shared repo
def list_share_repos(user, share_type, start, limit):
try:
ret = seafserv_threaded_rpc.list_share_repos(user, share_type,
start, limit)
except SearpcError:
ret = []
return ret
def remove_share(repo_id, from_user, to_user):
seafserv_threaded_rpc.remove_share(repo_id, from_user, to_user)
def unshare_group_repo(repo_id, group_id, from_user):
return seafserv_threaded_rpc.group_unshare_repo(repo_id, int(group_id),
from_user)
def list_personal_shared_repos(user, user_type, start, limit):
"""
List personal repos that user share with others.
If `user_type` is 'from_email', list repos user shares to others;
If `user_type` is 'to_email', list repos others share to user.
"""
share_repos = list_share_repos(user, user_type, start, limit)
for repo in share_repos:
repo.user_perm = check_permission(repo.props.repo_id, user)
share_repos.sort(lambda x, y: cmp(y.last_modified, x.last_modified))
return share_repos
def list_org_shared_repos(org_id, user, user_type, start, limit):
"""
List org repos that user share with others.
If `user_type` is 'from_email', list repos user shares to others;
If `user_type` is 'to_email', list repos others sahre to user.
"""
try:
share_repos = seafserv_threaded_rpc.list_org_share_repos(org_id,
user, user_type,
start, limit)
except SearpcError:
share_repos = []
for repo in share_repos:
repo.user_perm = check_permission(repo.props.repo_id, user)
share_repos.sort(lambda x, y: cmp(y.last_modified, x.last_modified))
return share_repos
# dir
def list_dir_by_path(repo_id, commit_id, path):
try:
ret = seafserv_threaded_rpc.list_dir_by_path(repo_id, commit_id, path)
except SearpcError:
ret = None
return ret
# file
def post_empty_file(repo_id, parent_dir, file_name, user):
"""
Return true if successfully make a new file, otherwise false.
"""
try:
ret = seafserv_threaded_rpc.post_empty_file(repo_id, parent_dir,
file_name, user)
except SearpcError, e:
logger.error(e)
ret = -1
return True if ret == 0 else False
def del_file(repo_id, parent_dir, file_name, user):
"""
Return true if successfully delete a file, otherwise false.
"""
try:
ret = seafserv_threaded_rpc.del_file(repo_id, parent_dir,
file_name, user)
except SearpcError, e:
logger.error(e)
ret = -1
return True if ret == 0 else False
# misc functions
def is_valid_filename(file_or_dir):
"""
Check whether file name or directory name is valid.
"""
try:
ret = seafserv_threaded_rpc.is_valid_filename('', file_or_dir)
except SearpcError:
ret = 0
return ret
def get_file_size(store_id, version, file_id):
try:
fs = seafserv_threaded_rpc.get_file_size(store_id, version, file_id)
except SearpcError, e:
fs = 0
return fs
def get_file_id_by_path(repo_id, path):
try:
ret = seafserv_threaded_rpc.get_file_id_by_path(repo_id, path)
except SearpcError, e:
ret = ''
return ret
def get_related_users_by_repo(repo_id):
"""Give a repo id, returns a list of users of:
- the repo owner
- members of groups to which the repo is shared
- users to which the repo is shared
"""
owner = seafserv_threaded_rpc.get_repo_owner(repo_id)
if not owner:
# Can't happen
return []
users = [owner]
groups = get_shared_groups_by_repo(repo_id)
for group in groups:
members = get_group_members(group.id)
for member in members:
if member.user_name not in users:
users.append(member.user_name)
share_repos = list_share_repos(owner, 'from_email', -1, -1)
for repo in share_repos:
if repo.repo_id == repo_id:
if repo.user not in users:
users.append(repo.user)
return users
def get_related_users_by_org_repo(org_id, repo_id):
"""Org version of get_related_users_by_repo
"""
owner = get_org_repo_owner(repo_id)
if not owner:
# Can't happen
return []
users = [owner]
groups = get_org_groups_by_repo(org_id, repo_id)
for group in groups:
members = get_group_members(group.id)
for member in members:
if member.user_name not in users:
users.append(member.user_name)
share_repos = seafserv_threaded_rpc.list_org_share_repos(org_id, \
owner, 'from_email', -1, -1)
for repo in share_repos:
if repo.repo_id == repo_id:
if repo.user not in users:
users.append(repo.user)
return users
# quota
def check_quota(repo_id):
try:
ret = seafserv_threaded_rpc.check_quota(repo_id)
except SearpcError, e:
logger.error(e)
ret = -1
return ret
def get_user_quota(user):
try:
ret = seafserv_threaded_rpc.get_user_quota(user)
except SearpcError, e:
logger.error(e)
ret = 0
return ret
def get_user_quota_usage(user):
try:
ret = seafserv_threaded_rpc.get_user_quota_usage(user)
except SearpcError, e:
logger.error(e)
ret = 0
return ret
def get_user_share_usage(user):
try:
ret = seafserv_threaded_rpc.get_user_share_usage(user)
except SearpcError, e:
logger.error(e)
ret = 0
return ret
# access token
def web_get_access_token(repo_id, obj_id, op, username, use_onetime=1):
try:
ret = seafserv_rpc.web_get_access_token(repo_id, obj_id, op, username, use_onetime)
except SearpcError, e:
ret = ''
return ret
# password management
def unset_repo_passwd(repo_id, user):
"""
Remove user password of a encrypt repo.
Arguments:
- `repo_id`: encrypt repo id
- `user`: username
"""
try:
ret = seafserv_threaded_rpc.unset_passwd(repo_id, user)
except SearpcError, e:
ret = -1
return ret
def is_passwd_set(repo_id, user):
try:
ret = seafserv_rpc.is_passwd_set(repo_id, user)
except SearpcError, e:
ret = -1
return True if ret == 1 else False
# repo history limit
def get_repo_history_limit(repo_id):
try:
ret = seafserv_threaded_rpc.get_repo_history_limit(repo_id)
except SearpcError, e:
ret = -1
return ret
def set_repo_history_limit(repo_id, days):
try:
ret = seafserv_threaded_rpc.set_repo_history_limit(repo_id, days)
except SearpcError, e:
ret = -1
return ret

View File

@ -1,396 +0,0 @@
#!/usr/bin/env python
# coding: UTF-8
'''This scirpt builds the seafile debian source tarball. In this tarball,
libsearpc and ccnet is also included.
'''
import sys
####################
### Requires Python 2.6+
####################
if sys.version_info[0] == 3:
print 'Python 3 not supported yet. Quit now.'
sys.exit(1)
if sys.version_info[1] < 6:
print 'Python 2.6 or above is required. Quit now.'
sys.exit(1)
import os
import tempfile
import glob
import shutil
import re
import subprocess
import optparse
import atexit
####################
### Global variables
####################
# command line configuartion
conf = {}
# key names in the conf dictionary.
CONF_VERSION = 'version'
CONF_LIBSEARPC_VERSION = 'libsearpc_version'
CONF_CCNET_VERSION = 'ccnet_version'
CONF_SEAFILE_VERSION = 'seafile_version'
CONF_SRCDIR = 'srcdir'
CONF_KEEP = 'keep'
CONF_BUILDDIR = 'builddir'
CONF_OUTPUTDIR = 'outputdir'
####################
### Common helper functions
####################
def highlight(content, is_error=False):
'''Add ANSI color to content to get it highlighted on terminal'''
if is_error:
return '\x1b[1;31m%s\x1b[m' % content
else:
return '\x1b[1;32m%s\x1b[m' % content
def info(msg):
print highlight('[INFO] ') + msg
def exist_in_path(prog):
'''Test whether prog exists in system path'''
dirs = os.environ['PATH'].split(':')
for d in dirs:
if d == '':
continue
path = os.path.join(d, prog)
if os.path.exists(path):
return True
return False
def error(msg=None, usage=None):
if msg:
print highlight('[ERROR] ') + msg
if usage:
print usage
sys.exit(1)
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Run a program and wait it to finish, and return its exit code. The
standard output of this program is supressed.
'''
info('running %s, cwd=%s' % (' '.join(argv), cwd if cwd else os.getcwd()))
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(argv,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env)
return proc.wait()
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Like run_argv but specify a command line string instead of argv'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(cmdline,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
shell=True)
return proc.wait()
def must_mkdir(path):
'''Create a directory, exit on failure'''
try:
os.mkdir(path)
except OSError, e:
error('failed to create directory %s:%s' % (path, e))
def must_copy(src, dst):
'''Copy src to dst, exit on failure'''
try:
shutil.copy(src, dst)
except Exception, e:
error('failed to copy %s to %s: %s' % (src, dst, e))
def check_targz_src(proj, version, srcdir):
src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version))
if not os.path.exists(src_tarball):
error('%s not exists' % src_tarball)
def remove_unused_files():
srcdir = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION])
web_sh_files = glob.glob(os.path.join(srcdir, 'web', '*.sh'))
files = [
os.path.join(srcdir, 'web', 'pygettext.py'),
]
files.extend(web_sh_files)
for f in files:
run('rm -f %s' % f)
def gen_tarball():
output = os.path.join(conf[CONF_OUTPUTDIR], 'seafile-server-latest.tar.gz')
dirname = 'seafile-%s' % conf[CONF_VERSION]
ignored_patterns = [
# windows msvc dlls
os.path.join(dirname, 'msi', 'bin*'),
]
excludes_list = [ '--exclude=%s' % pattern for pattern in ignored_patterns ]
argv = [
'tar',
'czvf',
output,
dirname,
]
argv.append(*excludes_list)
if run_argv(argv) != 0:
error('failed to gen %s' % output)
print '---------------------------------------------'
print 'The build is successfully. Output is:\t%s' % output
print '---------------------------------------------'
def uncompress_seafile():
src = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_SEAFILE_VERSION])
dst = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION])
if os.path.exists(src):
error('dir %s already exists' % src)
if os.path.exists(dst):
error('dir %s already exists' % dst)
tarball = os.path.join(conf[CONF_SRCDIR], 'seafile-%s.tar.gz' % conf[CONF_SEAFILE_VERSION])
argv = [ 'tar', 'xf',
tarball,
'-C', conf[CONF_BUILDDIR],
]
if run_argv(argv) != 0:
error('failed to uncompress seafile')
if conf[CONF_VERSION] != conf[CONF_SEAFILE_VERSION]:
shutil.move(src, dst)
def uncompress_libsearpc():
tarball = os.path.join(conf[CONF_SRCDIR], 'libsearpc-%s.tar.gz' % conf[CONF_LIBSEARPC_VERSION])
dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION], 'libsearpc')
must_mkdir(dst_dir)
argv = [ 'tar', 'xf',
tarball,
'--strip-components=1',
'-C', dst_dir,
]
if run_argv(argv) != 0:
error('failed to uncompress libsearpc')
def uncompress_ccnet():
tarball = os.path.join(conf[CONF_SRCDIR], 'ccnet-%s.tar.gz' % conf[CONF_CCNET_VERSION])
dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION], 'ccnet')
must_mkdir(dst_dir)
argv = [ 'tar', 'xf',
tarball,
'--strip-components=1',
'-C', dst_dir,
]
if run_argv(argv) != 0:
error('failed to uncompress ccnet')
def remove_debian_subdir():
debian_subdir = os.path.join(conf[CONF_BUILDDIR], 'seafile-%s' % conf[CONF_VERSION], 'debian')
argv = [ 'rm', '-rf', debian_subdir ]
if run_argv(argv) != 0:
error('failed to uncompress ccnet')
def parse_args():
parser = optparse.OptionParser()
def long_opt(opt):
return '--' + opt
parser.add_option(long_opt(CONF_VERSION),
dest=CONF_VERSION,
nargs=1,
help='the version of seafile source. Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_SEAFILE_VERSION),
dest=CONF_SEAFILE_VERSION,
nargs=1,
help='the version of seafile. Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_LIBSEARPC_VERSION),
dest=CONF_LIBSEARPC_VERSION,
nargs=1,
help='the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_CCNET_VERSION),
dest=CONF_CCNET_VERSION,
nargs=1,
help='the version of ccnet as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_BUILDDIR),
dest=CONF_BUILDDIR,
nargs=1,
help='the directory to build the source. Defaults to /tmp',
default=tempfile.gettempdir())
parser.add_option(long_opt(CONF_OUTPUTDIR),
dest=CONF_OUTPUTDIR,
nargs=1,
help='the output directory to put the generated server tarball. Defaults to the current directory.',
default=os.getcwd())
parser.add_option(long_opt(CONF_SRCDIR),
dest=CONF_SRCDIR,
nargs=1,
help='''Source tarballs must be placed in this directory.''')
parser.add_option(long_opt(CONF_KEEP),
dest=CONF_KEEP,
action='store_true',
help='''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''')
usage = parser.format_help()
options, remain = parser.parse_args()
if remain:
error(usage=usage)
validate_args(usage, options)
def validate_args(usage, options):
required_args = [
CONF_VERSION,
CONF_SEAFILE_VERSION,
CONF_LIBSEARPC_VERSION,
CONF_CCNET_VERSION,
CONF_SRCDIR,
]
# fist check required args
for optname in required_args:
if getattr(options, optname, None) == None:
error('%s must be specified' % optname, usage=usage)
def get_option(optname):
return getattr(options, optname)
# [ version ]
def check_project_version(version):
'''A valid version must be like 1.2.2, 1.3'''
if not re.match('^[0-9](\.[0-9])+$', version):
error('%s is not a valid version' % version, usage=usage)
version = get_option(CONF_VERSION)
libsearpc_version = get_option(CONF_LIBSEARPC_VERSION)
ccnet_version = get_option(CONF_CCNET_VERSION)
seafile_version = get_option(CONF_SEAFILE_VERSION)
check_project_version(version)
check_project_version(libsearpc_version)
check_project_version(ccnet_version)
check_project_version(seafile_version)
# [ srcdir ]
srcdir = get_option(CONF_SRCDIR)
check_targz_src('libsearpc', libsearpc_version, srcdir)
check_targz_src('ccnet', ccnet_version, srcdir)
check_targz_src('seafile', seafile_version, srcdir)
# [ builddir ]
builddir = get_option(CONF_BUILDDIR)
if not os.path.exists(builddir):
error('%s does not exist' % builddir, usage=usage)
builddir = os.path.join(builddir, 'seafile-deb-src')
# [ outputdir ]
outputdir = get_option(CONF_OUTPUTDIR)
if not os.path.exists(outputdir):
error('outputdir %s does not exist' % outputdir, usage=usage)
# [ keep ]
keep = get_option(CONF_KEEP)
conf[CONF_VERSION] = version
conf[CONF_LIBSEARPC_VERSION] = libsearpc_version
conf[CONF_CCNET_VERSION] = ccnet_version
conf[CONF_SEAFILE_VERSION] = seafile_version
conf[CONF_BUILDDIR] = builddir
conf[CONF_SRCDIR] = srcdir
conf[CONF_OUTPUTDIR] = outputdir
conf[CONF_KEEP] = keep
prepare_builddir(builddir)
show_build_info()
def prepare_builddir(builddir):
must_mkdir(builddir)
if not conf[CONF_KEEP]:
def remove_builddir():
'''Remove the builddir when exit'''
info('remove builddir before exit')
shutil.rmtree(builddir, ignore_errors=True)
atexit.register(remove_builddir)
os.chdir(builddir)
def show_build_info():
'''Print all conf information. Confirm before continue.'''
info('------------------------------------------')
info('Seafile debian source tarball %s:' % conf[CONF_VERSION])
info('------------------------------------------')
info('seafile: %s' % conf[CONF_SEAFILE_VERSION])
info('ccnet: %s' % conf[CONF_CCNET_VERSION])
info('libsearpc: %s' % conf[CONF_LIBSEARPC_VERSION])
info('builddir: %s' % conf[CONF_BUILDDIR])
info('outputdir: %s' % conf[CONF_OUTPUTDIR])
info('source dir: %s' % conf[CONF_SRCDIR])
info('clean on exit: %s' % (not conf[CONF_KEEP]))
info('------------------------------------------')
info('press any key to continue ')
info('------------------------------------------')
dummy = raw_input()
def main():
parse_args()
uncompress_seafile()
uncompress_libsearpc()
uncompress_ccnet()
remove_debian_subdir()
remove_unused_files()
gen_tarball()
if __name__ == '__main__':
main()

View File

@ -1,889 +0,0 @@
#!/usr/bin/env python
# coding: UTF-8
'''This script builds the seafile server tarball.
Some notes:
1. The working directory is always the 'builddir'. 'os.chdir' is only called
to change to the 'builddir'. We make use of the 'cwd' argument in
'subprocess.Popen' to run a command in a specific directory.
2. django/djangorestframework/djblets/gunicorn/flup must be easy_install-ed to
a directory before running this script. That directory is passed in as the
'--thirdpartdir' arguments.
'''
import sys
####################
### Requires Python 2.6+
####################
if sys.version_info[0] == 3:
print 'Python 3 not supported yet. Quit now.'
sys.exit(1)
if sys.version_info[1] < 6:
print 'Python 2.6 or above is required. Quit now.'
sys.exit(1)
import os
import glob
import commands
import tempfile
import shutil
import re
import subprocess
import optparse
import atexit
import platform
####################
### Global variables
####################
# command line configuartion
conf = {}
# key names in the conf dictionary.
CONF_VERSION = 'version'
CONF_SEAFILE_VERSION = 'seafile_version'
CONF_LIBSEARPC_VERSION = 'libsearpc_version'
CONF_CCNET_VERSION = 'ccnet_version'
CONF_SRCDIR = 'srcdir'
CONF_KEEP = 'keep'
CONF_BUILDDIR = 'builddir'
CONF_OUTPUTDIR = 'outputdir'
CONF_THIRDPARTDIR = 'thirdpartdir'
CONF_NO_STRIP = 'nostrip'
CONF_ENABLE_S3 = 's3'
CONF_YES = 'yes'
CONF_JOBS = 'jobs'
####################
### Common helper functions
####################
def highlight(content, is_error=False):
'''Add ANSI color to content to get it highlighted on terminal'''
if is_error:
return '\x1b[1;31m%s\x1b[m' % content
else:
return '\x1b[1;32m%s\x1b[m' % content
def info(msg):
print highlight('[INFO] ') + msg
def find_in_path(prog):
'''Find a file in system path'''
dirs = os.environ['PATH'].split(':')
for d in dirs:
if d == '':
continue
path = os.path.join(d, prog)
if os.path.exists(path):
return path
return None
def error(msg=None, usage=None):
if msg:
print highlight('[ERROR] ') + msg
if usage:
print usage
sys.exit(1)
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Run a program and wait for it to finish, and return its exit code. The
standard output of this program is supressed.
'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(argv,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env)
return proc.wait()
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Like run_argv but specify a command line string instead of argv'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(cmdline,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
shell=True)
return proc.wait()
def must_mkdir(path):
'''Create a directory, exit on failure'''
try:
os.mkdir(path)
except OSError, e:
error('failed to create directory %s:%s' % (path, e))
def must_copy(src, dst):
'''Copy src to dst, exit on failure'''
try:
shutil.copy(src, dst)
except Exception, e:
error('failed to copy %s to %s: %s' % (src, dst, e))
class Project(object):
'''Base class for a project'''
# Probject name, i.e. libseaprc/ccnet/seafile/seahub
name = ''
# A list of shell commands to configure/build the project
build_commands = []
def __init__(self):
# the path to pass to --prefix=/<prefix>
self.prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile')
self.version = self.get_version()
self.src_tarball = os.path.join(conf[CONF_SRCDIR],
'%s-%s.tar.gz' % (self.name, self.version))
# project dir, like <builddir>/seafile-1.2.2/
self.projdir = os.path.join(conf[CONF_BUILDDIR], '%s-%s' % (self.name, self.version))
def get_version(self):
# libsearpc and ccnet can have different versions from seafile.
raise NotImplementedError
def uncompress(self):
'''Uncompress the source from the tarball'''
info('Uncompressing %s' % self.name)
if run('tar xf %s' % self.src_tarball) < 0:
error('failed to uncompress source of %s' % self.name)
def build(self):
'''Build the source'''
info('Building %s' % self.name)
for cmd in self.build_commands:
if run(cmd, cwd=self.projdir) != 0:
error('error when running command:\n\t%s\n' % cmd)
class Libsearpc(Project):
name = 'libsearpc'
def __init__(self):
Project.__init__(self)
self.build_commands = [
'./configure --prefix=%s' % self.prefix,
'make -j%s' % conf[CONF_JOBS],
'make install'
]
def get_version(self):
return conf[CONF_LIBSEARPC_VERSION]
class Ccnet(Project):
name = 'ccnet'
def __init__(self):
Project.__init__(self)
self.build_commands = [
'./configure --prefix=%s --disable-client --enable-server --enable-pgsql --enable-ldap' % self.prefix,
'make -j%s' % conf[CONF_JOBS],
'make install'
]
def get_version(self):
return conf[CONF_CCNET_VERSION]
class Seafile(Project):
name = 'seafile'
def __init__(self):
Project.__init__(self)
s3_support = ''
if conf[CONF_ENABLE_S3]:
s3_support = '--enable-s3'
self.build_commands = [
'./configure --prefix=%s --disable-client --enable-server --enable-pgsql %s' \
% (self.prefix, s3_support),
'make -j%s' % conf[CONF_JOBS],
'make install'
]
def get_version(self):
return conf[CONF_SEAFILE_VERSION]
class Seahub(Project):
name = 'seahub'
def __init__(self):
Project.__init__(self)
# nothing to do for seahub
self.build_commands = [
]
def get_version(self):
return conf[CONF_SEAFILE_VERSION]
def build(self):
self.write_version_to_settings_py()
Project.build(self)
def write_version_to_settings_py(self):
'''Write the version of current seafile server to seahub'''
settings_py = os.path.join(self.projdir, 'seahub', 'settings.py')
line = '\nSEAFILE_VERSION = "%s"\n' % conf[CONF_VERSION]
with open(settings_py, 'a+') as fp:
fp.write(line)
def check_seahub_thirdpart(thirdpartdir):
'''The ${thirdpartdir} must have django/djblets/gunicorn pre-installed. So
we can copy it to seahub/thirdpart
'''
thirdpart_libs = [
'Django',
# 'Djblets',
'gunicorn',
'flup',
'chardet',
'python_dateutil',
'django_picklefield',
'django_constance',
# 'SQLAlchemy',
# 'python_daemon',
# 'lockfile',
# 'six',
]
def check_thirdpart_lib(name):
name += '*'
if not glob.glob(os.path.join(thirdpartdir, name)):
error('%s not found in %s' % (name, thirdpartdir))
for lib in thirdpart_libs:
check_thirdpart_lib(lib)
def check_targz_src(proj, version, srcdir):
src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version))
if not os.path.exists(src_tarball):
error('%s not exists' % src_tarball)
def check_targz_src_no_version(proj, srcdir):
src_tarball = os.path.join(srcdir, '%s.tar.gz' % proj)
if not os.path.exists(src_tarball):
error('%s not exists' % src_tarball)
def check_pdf2htmlEX():
pdf2htmlEX_executable = find_in_path('pdf2htmlEX')
if pdf2htmlEX_executable is None:
error('pdf2htmlEX not found')
def validate_args(usage, options):
required_args = [
CONF_VERSION,
CONF_LIBSEARPC_VERSION,
CONF_CCNET_VERSION,
CONF_SEAFILE_VERSION,
CONF_SRCDIR,
CONF_THIRDPARTDIR,
]
# fist check required args
for optname in required_args:
if getattr(options, optname, None) == None:
error('%s must be specified' % optname, usage=usage)
def get_option(optname):
return getattr(options, optname)
# [ version ]
def check_project_version(version):
'''A valid version must be like 1.2.2, 1.3'''
if not re.match('^[0-9]+(\.([0-9])+)+$', version):
error('%s is not a valid version' % version, usage=usage)
version = get_option(CONF_VERSION)
seafile_version = get_option(CONF_SEAFILE_VERSION)
libsearpc_version = get_option(CONF_LIBSEARPC_VERSION)
ccnet_version = get_option(CONF_CCNET_VERSION)
check_project_version(version)
check_project_version(libsearpc_version)
check_project_version(ccnet_version)
check_project_version(seafile_version)
# [ srcdir ]
srcdir = get_option(CONF_SRCDIR)
check_targz_src('libsearpc', libsearpc_version, srcdir)
check_targz_src('ccnet', ccnet_version, srcdir)
check_targz_src('seafile', seafile_version, srcdir)
check_targz_src('seahub', seafile_version, srcdir)
check_targz_src_no_version('seafdav', srcdir)
check_targz_src_no_version('seafobj', srcdir)
# check_pdf2htmlEX()
# [ builddir ]
builddir = get_option(CONF_BUILDDIR)
if not os.path.exists(builddir):
error('%s does not exist' % builddir, usage=usage)
builddir = os.path.join(builddir, 'seafile-server-build')
# [ thirdpartdir ]
thirdpartdir = get_option(CONF_THIRDPARTDIR)
check_seahub_thirdpart(thirdpartdir)
# [ outputdir ]
outputdir = get_option(CONF_OUTPUTDIR)
if outputdir:
if not os.path.exists(outputdir):
error('outputdir %s does not exist' % outputdir, usage=usage)
else:
outputdir = os.getcwd()
# [ yes ]
yes = get_option(CONF_YES)
# [ jobs ]
jobs = get_option(CONF_JOBS)
# [ keep ]
keep = get_option(CONF_KEEP)
# [ no strip]
nostrip = get_option(CONF_NO_STRIP)
# [ s3 ]
s3 = get_option(CONF_ENABLE_S3)
conf[CONF_VERSION] = version
conf[CONF_LIBSEARPC_VERSION] = libsearpc_version
conf[CONF_SEAFILE_VERSION] = seafile_version
conf[CONF_CCNET_VERSION] = ccnet_version
conf[CONF_BUILDDIR] = builddir
conf[CONF_SRCDIR] = srcdir
conf[CONF_OUTPUTDIR] = outputdir
conf[CONF_KEEP] = keep
conf[CONF_THIRDPARTDIR] = thirdpartdir
conf[CONF_NO_STRIP] = nostrip
conf[CONF_ENABLE_S3] = s3
conf[CONF_YES] = yes
conf[CONF_JOBS] = jobs
prepare_builddir(builddir)
show_build_info()
def show_build_info():
'''Print all conf information. Confirm before continue.'''
info('------------------------------------------')
info('Seafile server %s: BUILD INFO' % conf[CONF_VERSION])
info('------------------------------------------')
info('seafile: %s' % conf[CONF_SEAFILE_VERSION])
info('ccnet: %s' % conf[CONF_CCNET_VERSION])
info('libsearpc: %s' % conf[CONF_LIBSEARPC_VERSION])
info('builddir: %s' % conf[CONF_BUILDDIR])
info('outputdir: %s' % conf[CONF_OUTPUTDIR])
info('source dir: %s' % conf[CONF_SRCDIR])
info('strip symbols: %s' % (not conf[CONF_NO_STRIP]))
info('s3 support: %s' % (conf[CONF_ENABLE_S3]))
info('clean on exit: %s' % (not conf[CONF_KEEP]))
if conf[CONF_YES]:
return
info('------------------------------------------')
info('press any key to continue ')
info('------------------------------------------')
raw_input()
def prepare_builddir(builddir):
must_mkdir(builddir)
if not conf[CONF_KEEP]:
def remove_builddir():
'''Remove the builddir when exit'''
info('remove builddir before exit')
shutil.rmtree(builddir, ignore_errors=True)
atexit.register(remove_builddir)
os.chdir(builddir)
must_mkdir(os.path.join(builddir, 'seafile-server'))
must_mkdir(os.path.join(builddir, 'seafile-server', 'seafile'))
def parse_args():
parser = optparse.OptionParser()
def long_opt(opt):
return '--' + opt
parser.add_option(long_opt(CONF_YES),
dest=CONF_YES,
action='store_true')
parser.add_option(long_opt(CONF_JOBS),
dest=CONF_JOBS,
default=2,
type=int)
parser.add_option(long_opt(CONF_THIRDPARTDIR),
dest=CONF_THIRDPARTDIR,
nargs=1,
help='where to find the thirdpart libs for seahub')
parser.add_option(long_opt(CONF_VERSION),
dest=CONF_VERSION,
nargs=1,
help='the version to build. Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_SEAFILE_VERSION),
dest=CONF_SEAFILE_VERSION,
nargs=1,
help='the version of seafile as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_LIBSEARPC_VERSION),
dest=CONF_LIBSEARPC_VERSION,
nargs=1,
help='the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_CCNET_VERSION),
dest=CONF_CCNET_VERSION,
nargs=1,
help='the version of ccnet as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_BUILDDIR),
dest=CONF_BUILDDIR,
nargs=1,
help='the directory to build the source. Defaults to /tmp',
default=tempfile.gettempdir())
parser.add_option(long_opt(CONF_OUTPUTDIR),
dest=CONF_OUTPUTDIR,
nargs=1,
help='the output directory to put the generated server tarball. Defaults to the current directory.',
default=os.getcwd())
parser.add_option(long_opt(CONF_SRCDIR),
dest=CONF_SRCDIR,
nargs=1,
help='''Source tarballs must be placed in this directory.''')
parser.add_option(long_opt(CONF_KEEP),
dest=CONF_KEEP,
action='store_true',
help='''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''')
parser.add_option(long_opt(CONF_NO_STRIP),
dest=CONF_NO_STRIP,
action='store_true',
help='''do not strip debug symbols''')
parser.add_option(long_opt(CONF_ENABLE_S3),
dest=CONF_ENABLE_S3,
action='store_true',
help='''enable amazon s3 support''')
usage = parser.format_help()
options, remain = parser.parse_args()
if remain:
error(usage=usage)
validate_args(usage, options)
def setup_build_env():
'''Setup environment variables, such as export PATH=$BUILDDDIR/bin:$PATH'''
prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seafile')
def prepend_env_value(name, value, seperator=':'):
'''append a new value to a list'''
try:
current_value = os.environ[name]
except KeyError:
current_value = ''
new_value = value
if current_value:
new_value += seperator + current_value
os.environ[name] = new_value
prepend_env_value('CPPFLAGS',
'-I%s' % os.path.join(prefix, 'include'),
seperator=' ')
prepend_env_value('CPPFLAGS',
'-DLIBICONV_PLUG',
seperator=' ')
if conf[CONF_NO_STRIP]:
prepend_env_value('CPPFLAGS',
'-g -O0',
seperator=' ')
prepend_env_value('CFLAGS',
'-g -O0',
seperator=' ')
prepend_env_value('LDFLAGS',
'-L%s' % os.path.join(prefix, 'lib'),
seperator=' ')
prepend_env_value('LDFLAGS',
'-L%s' % os.path.join(prefix, 'lib64'),
seperator=' ')
prepend_env_value('PATH', os.path.join(prefix, 'bin'))
prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib', 'pkgconfig'))
prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib64', 'pkgconfig'))
def copy_user_manuals():
builddir = conf[CONF_BUILDDIR]
# src_pattern = os.path.join(builddir, Seafile().projdir, 'doc', '*.doc')
src_pattern = os.path.join(builddir, Seafile().projdir, 'doc', 'seafile-tutorial.doc')
dst_dir = os.path.join(builddir, 'seafile-server', 'seafile', 'docs')
must_mkdir(dst_dir)
for path in glob.glob(src_pattern):
must_copy(path, dst_dir)
def copy_seafdav():
dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', 'thirdpart')
tarball = os.path.join(conf[CONF_SRCDIR], 'seafdav.tar.gz')
if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0:
error('failed to uncompress %s' % tarball)
dst_dir = os.path.join(conf[CONF_BUILDDIR], 'seafile-server', 'seahub', 'thirdpart')
tarball = os.path.join(conf[CONF_SRCDIR], 'seafobj.tar.gz')
if run('tar xf %s -C %s' % (tarball, dst_dir)) != 0:
error('failed to uncompress %s' % tarball)
def copy_scripts_and_libs():
'''Copy server release scripts and shared libs, as well as seahub
thirdpart libs
'''
builddir = conf[CONF_BUILDDIR]
scripts_srcdir = os.path.join(builddir, Seafile().projdir, 'scripts')
serverdir = os.path.join(builddir, 'seafile-server')
must_copy(os.path.join(scripts_srcdir, 'setup-seafile.sh'),
serverdir)
must_copy(os.path.join(scripts_srcdir, 'setup-seafile-mysql.sh'),
serverdir)
must_copy(os.path.join(scripts_srcdir, 'setup-seafile-mysql.py'),
serverdir)
must_copy(os.path.join(scripts_srcdir, 'seafile.sh'),
serverdir)
must_copy(os.path.join(scripts_srcdir, 'seahub.sh'),
serverdir)
must_copy(os.path.join(scripts_srcdir, 'reset-admin.sh'),
serverdir)
must_copy(os.path.join(scripts_srcdir, 'seaf-fuse.sh'),
serverdir)
must_copy(os.path.join(scripts_srcdir, 'check_init_admin.py'),
serverdir)
must_copy(os.path.join(scripts_srcdir, 'seaf-gc.sh'),
serverdir)
must_copy(os.path.join(scripts_srcdir, 'seaf-fsck.sh'),
serverdir)
# copy update scripts
update_scriptsdir = os.path.join(scripts_srcdir, 'upgrade')
dst_update_scriptsdir = os.path.join(serverdir, 'upgrade')
try:
shutil.copytree(update_scriptsdir, dst_update_scriptsdir)
except Exception, e:
error('failed to copy upgrade scripts: %s' % e)
# copy runtime/seahub.conf
runtimedir = os.path.join(serverdir, 'runtime')
must_mkdir(runtimedir)
must_copy(os.path.join(scripts_srcdir, 'seahub.conf'),
runtimedir)
# move seahub to seafile-server/seahub
src_seahubdir = Seahub().projdir
dst_seahubdir = os.path.join(serverdir, 'seahub')
try:
shutil.move(src_seahubdir, dst_seahubdir)
except Exception, e:
error('failed to move seahub to seafile-server/seahub: %s' % e)
# copy seahub thirdpart libs
seahub_thirdpart = os.path.join(dst_seahubdir, 'thirdpart')
copy_seahub_thirdpart_libs(seahub_thirdpart)
copy_seafdav()
# copy_pdf2htmlex()
# copy shared c libs
copy_shared_libs()
copy_user_manuals()
def copy_pdf2htmlex():
'''Copy pdf2htmlEX exectuable and its dependent libs'''
pdf2htmlEX_executable = find_in_path('pdf2htmlEX')
libs = get_dependent_libs(pdf2htmlEX_executable)
builddir = conf[CONF_BUILDDIR]
dst_lib_dir = os.path.join(builddir,
'seafile-server',
'seafile',
'lib')
dst_bin_dir = os.path.join(builddir,
'seafile-server',
'seafile',
'bin')
for lib in libs:
dst_file = os.path.join(dst_lib_dir, os.path.basename(lib))
if os.path.exists(dst_file):
continue
info('Copying %s' % lib)
must_copy(lib, dst_lib_dir)
must_copy(pdf2htmlEX_executable, dst_bin_dir)
def get_dependent_libs(executable):
syslibs = ['libsearpc', 'libccnet', 'libseafile', 'libpthread.so', 'libc.so', 'libm.so', 'librt.so', 'libdl.so', 'libselinux.so', 'libresolv.so' ]
def is_syslib(lib):
for syslib in syslibs:
if syslib in lib:
return True
return False
ldd_output = commands.getoutput('ldd %s' % executable)
ret = set()
for line in ldd_output.splitlines():
tokens = line.split()
if len(tokens) != 4:
continue
if is_syslib(tokens[0]):
continue
ret.add(tokens[2])
return ret
def copy_shared_libs():
'''copy shared c libs, such as libevent, glib, libmysqlclient'''
builddir = conf[CONF_BUILDDIR]
dst_dir = os.path.join(builddir,
'seafile-server',
'seafile',
'lib')
seafile_path = os.path.join(builddir,
'seafile-server',
'seafile',
'bin',
'seaf-server')
ccnet_server_path = os.path.join(builddir,
'seafile-server',
'seafile',
'bin',
'ccnet-server')
seaf_fuse_path = os.path.join(builddir,
'seafile-server',
'seafile',
'bin',
'seaf-fuse')
libs = set()
libs.update(get_dependent_libs(ccnet_server_path))
libs.update(get_dependent_libs(seafile_path))
libs.update(get_dependent_libs(seaf_fuse_path))
for lib in libs:
dst_file = os.path.join(dst_dir, os.path.basename(lib))
if os.path.exists(dst_file):
continue
info('Copying %s' % lib)
shutil.copy(lib, dst_dir)
def copy_seahub_thirdpart_libs(seahub_thirdpart):
'''copy django/djblets/gunicorn from ${thirdpartdir} to
seahub/thirdpart
'''
src = conf[CONF_THIRDPARTDIR]
dst = seahub_thirdpart
pattern = os.path.join(src, '*')
try:
for path in glob.glob(pattern):
target_path = os.path.join(dst, os.path.basename(path))
if os.path.isdir(path):
shutil.copytree(path, target_path)
else:
shutil.copy(path, target_path)
except Exception, e:
error('failed to copy seahub thirdpart libs: %s' % e)
def strip_symbols():
def do_strip(fn):
run('chmod u+w %s' % fn)
info('stripping: %s' % fn)
run('strip "%s"' % fn)
def remove_static_lib(fn):
info('removing: %s' % fn)
os.remove(fn)
for parent, dnames, fnames in os.walk('seafile-server/seafile'):
dummy = dnames # avoid pylint 'unused' warning
for fname in fnames:
fn = os.path.join(parent, fname)
if os.path.isdir(fn):
continue
if fn.endswith(".a") or fn.endswith(".la"):
remove_static_lib(fn)
continue
if os.path.islink(fn):
continue
finfo = commands.getoutput('file "%s"' % fn)
if 'not stripped' in finfo:
do_strip(fn)
def create_tarball(tarball_name):
'''call tar command to generate a tarball'''
version = conf[CONF_VERSION]
serverdir = 'seafile-server'
versioned_serverdir = 'seafile-server-' + version
# move seafile-server to seafile-server-${version}
try:
shutil.move(serverdir, versioned_serverdir)
except Exception, e:
error('failed to move %s to %s: %s' % (serverdir, versioned_serverdir, e))
ignored_patterns = [
# common ignored files
'*.pyc',
'*~',
'*#',
# seahub
os.path.join(versioned_serverdir, 'seahub', '.git*'),
os.path.join(versioned_serverdir, 'seahub', 'media', 'flexpaper*'),
os.path.join(versioned_serverdir, 'seahub', 'avatar', 'testdata*'),
# seafile
os.path.join(versioned_serverdir, 'seafile', 'share*'),
os.path.join(versioned_serverdir, 'seafile', 'include*'),
os.path.join(versioned_serverdir, 'seafile', 'lib', 'pkgconfig*'),
os.path.join(versioned_serverdir, 'seafile', 'lib64', 'pkgconfig*'),
os.path.join(versioned_serverdir, 'seafile', 'bin', 'ccnet-demo*'),
os.path.join(versioned_serverdir, 'seafile', 'bin', 'ccnet-tool'),
os.path.join(versioned_serverdir, 'seafile', 'bin', 'ccnet-servtool'),
os.path.join(versioned_serverdir, 'seafile', 'bin', 'searpc-codegen.py'),
os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile-admin'),
os.path.join(versioned_serverdir, 'seafile', 'bin', 'seafile'),
]
excludes_list = [ '--exclude=%s' % pattern for pattern in ignored_patterns ]
excludes = ' '.join(excludes_list)
tar_cmd = 'tar czf %(tarball_name)s %(versioned_serverdir)s %(excludes)s' \
% dict(tarball_name=tarball_name,
versioned_serverdir=versioned_serverdir,
excludes=excludes)
if run(tar_cmd) < 0:
error('failed to generate the tarball')
def gen_tarball():
# strip symbols of libraries to reduce size
if not conf[CONF_NO_STRIP]:
try:
strip_symbols()
except Exception, e:
error('failed to strip symbols: %s' % e)
# determine the output name
# 64-bit: seafile-server_1.2.2_x86-64.tar.gz
# 32-bit: seafile-server_1.2.2_i386.tar.gz
version = conf[CONF_VERSION]
arch = os.uname()[-1].replace('_', '-')
if 'arm' in platform.machine():
arch = 'pi'
elif arch != 'x86-64':
arch = 'i386'
dbg = ''
if conf[CONF_NO_STRIP]:
dbg = '.dbg'
tarball_name = 'seafile-server_%(version)s_%(arch)s%(dbg)s.tar.gz' \
% dict(version=version, arch=arch, dbg=dbg)
dst_tarball = os.path.join(conf[CONF_OUTPUTDIR], tarball_name)
# generate the tarball
try:
create_tarball(tarball_name)
except Exception, e:
error('failed to generate tarball: %s' % e)
# move tarball to outputdir
try:
shutil.copy(tarball_name, dst_tarball)
except Exception, e:
error('failed to copy %s to %s: %s' % (tarball_name, dst_tarball, e))
print '---------------------------------------------'
print 'The build is successful. Output is:\t%s' % dst_tarball
print '---------------------------------------------'
def main():
parse_args()
setup_build_env()
libsearpc = Libsearpc()
ccnet = Ccnet()
seafile = Seafile()
seahub = Seahub()
libsearpc.uncompress()
libsearpc.build()
ccnet.uncompress()
ccnet.build()
seafile.uncompress()
seafile.build()
seahub.uncompress()
seahub.build()
copy_scripts_and_libs()
gen_tarball()
if __name__ == '__main__':
main()

View File

@ -1,373 +0,0 @@
#coding: UTF-8
'''This script would check if there is admin, and prompt the user to create a new one if non exist'''
import sys
import os
import time
import re
import shutil
import glob
import subprocess
import hashlib
import getpass
import uuid
import warnings
from ConfigParser import ConfigParser
try:
import readline # pylint: disable=W0611
except ImportError:
pass
SERVER_MANUAL_HTTP = 'https://github.com/haiwen/seafile/wiki'
class Utils(object):
'''Groups all helper functions here'''
@staticmethod
def welcome():
'''Show welcome message'''
welcome_msg = '''\
-----------------------------------------------------------------
This script will guide you to setup your seafile server using MySQL.
Make sure you have read seafile server manual at
%s
Press ENTER to continue
-----------------------------------------------------------------''' % SERVER_MANUAL_HTTP
print welcome_msg
raw_input()
@staticmethod
def highlight(content):
'''Add ANSI color to content to get it highlighted on terminal'''
return '\x1b[33m%s\x1b[m' % content
@staticmethod
def info(msg):
print msg
@staticmethod
def error(msg):
'''Print error and exit'''
print
print 'Error: ' + msg
sys.exit(1)
@staticmethod
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Run a program and wait it to finish, and return its exit code. The
standard output of this program is supressed.
'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(argv,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env)
return proc.wait()
@staticmethod
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Like run_argv but specify a command line string instead of argv'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(cmdline,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
shell=True)
return proc.wait()
@staticmethod
def prepend_env_value(name, value, env=None, seperator=':'):
'''prepend a new value to a list'''
if env is None:
env = os.environ
try:
current_value = env[name]
except KeyError:
current_value = ''
new_value = value
if current_value:
new_value += seperator + current_value
env[name] = new_value
@staticmethod
def must_mkdir(path):
'''Create a directory, exit on failure'''
try:
os.mkdir(path)
except OSError, e:
Utils.error('failed to create directory %s:%s' % (path, e))
@staticmethod
def must_copy(src, dst):
'''Copy src to dst, exit on failure'''
try:
shutil.copy(src, dst)
except Exception, e:
Utils.error('failed to copy %s to %s: %s' % (src, dst, e))
@staticmethod
def find_in_path(prog):
if 'win32' in sys.platform:
sep = ';'
else:
sep = ':'
dirs = os.environ['PATH'].split(sep)
for d in dirs:
d = d.strip()
if d == '':
continue
path = os.path.join(d, prog)
if os.path.exists(path):
return path
return None
@staticmethod
def get_python_executable():
'''Return the python executable. This should be the PYTHON environment
variable which is set in setup-seafile-mysql.sh
'''
return os.environ['PYTHON']
@staticmethod
def read_config(fn):
'''Return a case sensitive ConfigParser by reading the file "fn"'''
cp = ConfigParser()
cp.optionxform = str
cp.read(fn)
return cp
@staticmethod
def write_config(cp, fn):
'''Return a case sensitive ConfigParser by reading the file "fn"'''
with open(fn, 'w') as fp:
cp.write(fp)
@staticmethod
def ask_question(desc,
key=None,
note=None,
default=None,
validate=None,
yes_or_no=False,
password=False):
'''Ask a question, return the answer.
@desc description, e.g. "What is the port of ccnet?"
@key a name to represent the target of the question, e.g. "port for
ccnet server"
@note additional information for the question, e.g. "Must be a valid
port number"
@default the default value of the question. If the default value is
not None, when the user enter nothing and press [ENTER], the default
value would be returned
@validate a function that takes the user input as the only parameter
and validate it. It should return a validated value, or throws an
"InvalidAnswer" exception if the input is not valid.
@yes_or_no If true, the user must answer "yes" or "no", and a boolean
value would be returned
@password If true, the user input would not be echoed to the
console
'''
assert key or yes_or_no
# Format description
print
if note:
desc += '\n' + note
desc += '\n'
if yes_or_no:
desc += '[ yes or no ]'
else:
if default:
desc += '[ default "%s" ]' % default
else:
desc += '[ %s ]' % key
desc += ' '
while True:
# prompt for user input
if password:
answer = getpass.getpass(desc).strip()
else:
answer = raw_input(desc).strip()
# No user input: use default
if not answer:
if default:
answer = default
else:
continue
# Have user input: validate answer
if yes_or_no:
if answer not in ['yes', 'no']:
print Utils.highlight('\nPlease answer yes or no\n')
continue
else:
return answer == 'yes'
else:
if validate:
try:
return validate(answer)
except InvalidAnswer, e:
print Utils.highlight('\n%s\n' % e)
continue
else:
return answer
@staticmethod
def validate_port(port):
try:
port = int(port)
except ValueError:
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
if port <= 0 or port > 65535:
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
return port
class InvalidAnswer(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
### END of Utils
####################
class RPC(object):
def __init__(self):
import ccnet
ccnet_dir = os.environ['CCNET_CONF_DIR']
central_config_dir = os.environ['SEAFILE_CENTRAL_CONF_DIR']
self.rpc_client = ccnet.CcnetThreadedRpcClient(
ccnet.ClientPool(ccnet_dir, central_config_dir=central_config_dir))
def get_db_email_users(self):
return self.rpc_client.get_emailusers('DB', 0, 1)
def create_admin(self, email, user):
return self.rpc_client.add_emailuser(email, user, 1, 1)
def need_create_admin():
users = rpc.get_db_email_users()
return len(users) == 0
def create_admin(email, passwd):
if rpc.create_admin(email, passwd) < 0:
raise Exception('failed to create admin')
else:
print '\n\n'
print '----------------------------------------'
print 'Successfully created seafile admin'
print '----------------------------------------'
print '\n\n'
def ask_admin_email():
print
print '----------------------------------------'
print 'It\'s the first time you start the seafile server. Now let\'s create the admin account'
print '----------------------------------------'
def validate(email):
# whitespace is not allowed
if re.match(r'[\s]', email):
raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email))
# must be a valid email address
if not re.match(r'^.+@.*\..+$', email):
raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email))
return email
key = 'admin email'
question = 'What is the ' + Utils.highlight('email') + ' for the admin account?'
return Utils.ask_question(question,
key=key,
validate=validate)
def ask_admin_password():
def validate(password):
key = 'admin password again'
question = 'Enter the ' + Utils.highlight('password again:')
password_again = Utils.ask_question(question,
key=key,
password=True)
if password_again != password:
raise InvalidAnswer('password mismatch')
return password
key = 'admin password'
question = 'What is the ' + Utils.highlight('password') + ' for the admin account?'
return Utils.ask_question(question,
key=key,
password=True,
validate=validate)
rpc = RPC()
def main():
if not need_create_admin():
return
email = ask_admin_email()
passwd = ask_admin_password()
create_admin(email, passwd)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print '\n\n\n'
print Utils.highlight('Aborted.')
print
sys.exit(1)
except Exception, e:
print
print Utils.highlight('Error happened during creating seafile admin.')
print

View File

@ -1,4 +0,0 @@
@echo off
cd /d %~dp0
set PYTHONPATH=%PYTHONPATH%;%~dp0\seahub\thirdpart
start python upgrade/py/gc.py

View File

@ -1,53 +0,0 @@
#!/bin/bash
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
central_config_dir=${TOPDIR}/conf
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python2.7 2>/dev/null 1>&2; then
PYTHON=python2.7
elif which python27 2>/dev/null 1>&2; then
PYTHON=python27
else
echo
echo "Can't find a python executable of version 2.7 or above in PATH"
echo "Install python 2.7+ before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
}
function read_seafile_data_dir () {
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
if [[ ! -f ${seafile_ini} ]]; then
echo "${seafile_ini} not found. Now quit"
exit 1
fi
seafile_data_dir=$(cat "${seafile_ini}")
if [[ ! -d ${seafile_data_dir} ]]; then
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
echo "Please check it first, or create this directory yourself."
echo ""
exit 1;
fi
}
check_python_executable;
read_seafile_data_dir;
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
export SEAFILE_CONF_DIR=${seafile_data_dir}
export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH
manage_py=${INSTALLPATH}/seahub/manage.py
exec "$PYTHON" "$manage_py" createsuperuser

View File

@ -1,77 +0,0 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_conf_dir=${TOPDIR}/conf
seaf_fsck=${INSTALLPATH}/seafile/bin/seaf-fsck
export PATH=${INSTALLPATH}/seafile/bin:$PATH
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
script_name=$0
function usage () {
echo "usage : "
echo "$(basename ${script_name}) [-h/--help] [-r/--repair] [-E/--export path_to_export] [repo_id_1 [repo_id_2 ...]]"
echo ""
}
function validate_ccnet_conf_dir () {
if [[ ! -d ${default_ccnet_conf_dir} ]]; then
echo "Error: there is no ccnet config directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit -1;
fi
}
function read_seafile_data_dir () {
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
if [[ ! -f ${seafile_ini} ]]; then
echo "${seafile_ini} not found. Now quit"
exit 1
fi
seafile_data_dir=$(cat "${seafile_ini}")
if [[ ! -d ${seafile_data_dir} ]]; then
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
echo "Please check it first, or create this directory yourself."
echo ""
exit 1;
fi
}
function run_seaf_fsck () {
validate_ccnet_conf_dir;
read_seafile_data_dir;
echo "Starting seaf-fsck, please wait ..."
echo
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_fsck} \
-c "${default_ccnet_conf_dir}" -d "${seafile_data_dir}" \
-F "${default_conf_dir}" \
${seaf_fsck_opts}
echo "seaf-fsck run done"
echo
}
if [ $# -gt 0 ];
then
for param in $@;
do
if [ ${param} = "-h" -o ${param} = "--help" ];
then
usage;
exit 1;
fi
done
fi
seaf_fsck_opts=$@
run_seaf_fsck;
echo "Done."

View File

@ -1,137 +0,0 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_conf_dir=${TOPDIR}/conf
seaf_fuse=${INSTALLPATH}/seafile/bin/seaf-fuse
export PATH=${INSTALLPATH}/seafile/bin:$PATH
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
script_name=$0
function usage () {
echo "usage : "
echo "$(basename ${script_name}) { start <mount-point> | stop | restart <mount-point> } "
echo ""
}
# check args
if [[ "$1" != "start" && "$1" != "stop" && "$1" != "restart" ]]; then
usage;
exit 1;
fi
if [[ ($1 == "start" || $1 == "restart" ) && $# -lt 2 ]]; then
usage;
exit 1
fi
if [[ $1 == "stop" && $# != 1 ]]; then
usage;
exit 1
fi
function validate_ccnet_conf_dir () {
if [[ ! -d ${default_ccnet_conf_dir} ]]; then
echo "Error: there is no ccnet config directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit -1;
fi
}
function read_seafile_data_dir () {
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
if [[ ! -f ${seafile_ini} ]]; then
echo "${seafile_ini} not found. Now quit"
exit 1
fi
seafile_data_dir=$(cat "${seafile_ini}")
if [[ ! -d ${seafile_data_dir} ]]; then
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
echo "Please check it first, or create this directory yourself."
echo ""
exit 1;
fi
}
function validate_already_running () {
if pid=$(pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null); then
echo "seaf-fuse is already running, pid $pid"
echo
exit 1;
fi
}
function warning_if_seafile_not_running () {
if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
echo
echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?"
echo
fi
}
function start_seaf_fuse () {
validate_already_running;
warning_if_seafile_not_running;
validate_ccnet_conf_dir;
read_seafile_data_dir;
echo "Starting seaf-fuse, please wait ..."
logfile=${TOPDIR}/logs/seaf-fuse.log
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_fuse} \
-c "${default_ccnet_conf_dir}" \
-d "${seafile_data_dir}" \
-F "${default_conf_dir}" \
-l "${logfile}" \
"$@"
sleep 2
# check if seaf-fuse started successfully
if ! pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
echo "Failed to start seaf-fuse"
exit 1;
fi
echo "seaf-fuse started"
echo
}
function stop_seaf_fuse() {
if ! pgrep -f "seaf-fuse -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
echo "seaf-fuse not running yet"
return 1;
fi
echo "Stopping seaf-fuse ..."
pkill -SIGTERM -f "seaf-fuse -c ${default_ccnet_conf_dir}"
return 0
}
function restart_seaf_fuse () {
stop_seaf_fuse
sleep 2
start_seaf_fuse $@
}
case $1 in
"start" )
shift
start_seaf_fuse $@;
;;
"stop" )
stop_seaf_fuse;
;;
"restart" )
shift
restart_seaf_fuse $@;
esac
echo "Done."

View File

@ -1,107 +0,0 @@
#!/bin/bash
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_conf_dir=${TOPDIR}/conf
seaf_gc=${INSTALLPATH}/seafile/bin/seafserv-gc
seaf_gc_opts=""
export PATH=${INSTALLPATH}/seafile/bin:$PATH
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
script_name=$0
function usage () {
echo "usage : "
echo "$(basename ${script_name}) [--dry-run | -D] [--rm-deleted | -r] [repo-id1] [repo-id2]"
echo ""
}
function validate_ccnet_conf_dir () {
if [[ ! -d ${default_ccnet_conf_dir} ]]; then
echo "Error: there is no ccnet config directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit -1;
fi
}
function read_seafile_data_dir () {
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
if [[ ! -f ${seafile_ini} ]]; then
echo "${seafile_ini} not found. Now quit"
exit 1
fi
seafile_data_dir=$(cat "${seafile_ini}")
if [[ ! -d ${seafile_data_dir} ]]; then
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
echo "Please check it first, or create this directory yourself."
echo ""
exit 1;
fi
}
function check_component_running() {
name=$1
cmd=$2
if pid=$(pgrep -f "$cmd" 2>/dev/null); then
echo "[$name] is running, pid $pid. You can stop it by: "
echo
echo " kill $pid"
echo
echo "Stop it and try again."
echo
exit
fi
}
function validate_already_running () {
if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then
echo "seafile server is still running, stop it by \"seafile.sh stop\""
echo
exit 1;
fi
check_component_running "ccnet-server" "ccnet-server -c ${default_ccnet_conf_dir}"
check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}"
check_component_running "fileserver" "fileserver -c ${default_ccnet_conf_dir}"
check_component_running "seafdav" "wsgidav.server.run_server"
}
function run_seaf_gc () {
validate_already_running;
validate_ccnet_conf_dir;
read_seafile_data_dir;
echo "Starting seafserv-gc, please wait ..."
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_gc} \
-c "${default_ccnet_conf_dir}" \
-d "${seafile_data_dir}" \
-F "${default_conf_dir}" \
${seaf_gc_opts}
echo "seafserv-gc run done"
echo
}
if [ $# -gt 0 ];
then
for param in $@;
do
if [ ${param} = "-h" -o ${param} = "--help" ];
then
usage;
exit 1;
fi
done
fi
seaf_gc_opts=$@
run_seaf_gc;
echo "Done."

View File

@ -1,181 +0,0 @@
#!/bin/bash
### BEGIN INIT INFO
# Provides: seafile
# Required-Start: $local_fs $remote_fs $network
# Required-Stop: $local_fs
# Default-Start: 1 2 3 4 5
# Default-Stop:
# Short-Description: Starts Seafile Server
# Description: starts Seafile Server
### END INIT INFO
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
central_config_dir=${TOPDIR}/conf
seaf_controller="${INSTALLPATH}/seafile/bin/seafile-controller"
export PATH=${INSTALLPATH}/seafile/bin:$PATH
export ORIG_LD_LIBRARY_PATH=${LD_LIBRARY_PATH}
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
script_name=$0
function usage () {
echo "usage : "
echo "$(basename ${script_name}) { start | stop | restart } "
echo ""
}
# check args
if [[ $# != 1 || ( "$1" != "start" && "$1" != "stop" && "$1" != "restart" ) ]]; then
usage;
exit 1;
fi
function validate_running_user () {
real_data_dir=`readlink -f ${seafile_data_dir}`
running_user=`id -un`
data_dir_owner=`stat -c %U ${real_data_dir}`
if [[ "${running_user}" != "${data_dir_owner}" ]]; then
echo "Error: the user running the script (\"${running_user}\") is not the owner of \"${real_data_dir}\" folder, you should use the user \"${data_dir_owner}\" to run the script."
exit -1;
fi
}
function validate_ccnet_conf_dir () {
if [[ ! -d ${default_ccnet_conf_dir} ]]; then
echo "Error: there is no ccnet config directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit -1;
fi
}
function validate_central_conf_dir () {
if [[ ! -d ${central_config_dir} ]]; then
echo "Error: there is no conf/ directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit -1;
fi
}
function read_seafile_data_dir () {
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
if [[ ! -f ${seafile_ini} ]]; then
echo "${seafile_ini} not found. Now quit"
exit 1
fi
seafile_data_dir=$(cat "${seafile_ini}")
if [[ ! -d ${seafile_data_dir} ]]; then
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
echo "Please check it first, or create this directory yourself."
echo ""
exit 1;
fi
}
function test_config() {
if ! LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_controller} --test \
-c "${default_ccnet_conf_dir}" \
-d "${seafile_data_dir}" \
-F "${central_config_dir}" ; then
exit 1;
fi
}
function check_component_running() {
name=$1
cmd=$2
if pid=$(pgrep -f "$cmd" 2>/dev/null); then
echo "[$name] is running, pid $pid. You can stop it by: "
echo
echo " kill $pid"
echo
echo "Stop it and try again."
echo
exit
fi
}
function validate_already_running () {
if pid=$(pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null); then
echo "Seafile controller is already running, pid $pid"
echo
exit 1;
fi
check_component_running "ccnet-server" "ccnet-server -c ${default_ccnet_conf_dir}"
check_component_running "seaf-server" "seaf-server -c ${default_ccnet_conf_dir}"
check_component_running "fileserver" "fileserver -c ${default_ccnet_conf_dir}"
check_component_running "seafdav" "wsgidav.server.run_server"
}
function start_seafile_server () {
validate_already_running;
validate_central_conf_dir;
validate_ccnet_conf_dir;
read_seafile_data_dir;
validate_running_user;
test_config;
echo "Starting seafile server, please wait ..."
mkdir -p $TOPDIR/logs
LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_controller} \
-c "${default_ccnet_conf_dir}" \
-d "${seafile_data_dir}" \
-F "${central_config_dir}"
sleep 3
# check if seafile server started successfully
if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
echo "Failed to start seafile server"
exit 1;
fi
echo "Seafile server started"
echo
}
function stop_seafile_server () {
if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
echo "seafile server not running yet"
return 1;
fi
echo "Stopping seafile server ..."
pkill -SIGTERM -f "seafile-controller -c ${default_ccnet_conf_dir}"
pkill -f "ccnet-server -c ${default_ccnet_conf_dir}"
pkill -f "seaf-server -c ${default_ccnet_conf_dir}"
pkill -f "fileserver -c ${default_ccnet_conf_dir}"
pkill -f "soffice.*--invisible --nocrashreport"
pkill -f "wsgidav.server.run_server"
return 0
}
function restart_seafile_server () {
stop_seafile_server;
sleep 2
start_seafile_server;
}
case $1 in
"start" )
start_seafile_server;
;;
"stop" )
stop_seafile_server;
;;
"restart" )
restart_seafile_server;
esac
echo "Done."

View File

@ -1,12 +0,0 @@
import os
daemon = True
workers = 3
# Logging
runtime_dir = os.path.dirname(__file__)
pidfile = os.path.join(runtime_dir, 'seahub.pid')
errorlog = os.path.join(runtime_dir, 'error.log')
accesslog = os.path.join(runtime_dir, 'access.log')
# for file upload, we need a longer timeout value (default is only 30s, too short)
timeout = 1200

View File

@ -1,274 +0,0 @@
#!/bin/bash
### BEGIN INIT INFO
# Provides: seahub
# Required-Start: $local_fs $remote_fs $network
# Required-Stop: $local_fs
# Default-Start: 1 2 3 4 5
# Default-Stop:
# Short-Description: Starts Seahub
# Description: starts Seahub
### END INIT INFO
echo ""
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
central_config_dir=${TOPDIR}/conf
manage_py=${INSTALLPATH}/seahub/manage.py
gunicorn_conf=${INSTALLPATH}/runtime/seahub.conf
pidfile=${INSTALLPATH}/runtime/seahub.pid
errorlog=${INSTALLPATH}/runtime/error.log
accesslog=${INSTALLPATH}/runtime/access.log
gunicorn_exe=${INSTALLPATH}/seahub/thirdpart/gunicorn
script_name=$0
function usage () {
echo "Usage: "
echo
echo " $(basename ${script_name}) { start <port> | stop | restart <port> }"
echo
echo "To run seahub in fastcgi:"
echo
echo " $(basename ${script_name}) { start-fastcgi <port> | stop | restart-fastcgi <port> }"
echo
echo "<port> is optional, and defaults to 8000"
echo ""
}
# Check args
if [[ $1 != "start" && $1 != "stop" && $1 != "restart" \
&& $1 != "start-fastcgi" && $1 != "restart-fastcgi" && $1 != "clearsessions" ]]; then
usage;
exit 1;
fi
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python2.7 2>/dev/null 1>&2; then
PYTHON=python2.7
elif which python27 2>/dev/null 1>&2; then
PYTHON=python27
else
echo
echo "Can't find a python executable of version 2.7 or above in PATH"
echo "Install python 2.7+ before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
}
function validate_ccnet_conf_dir () {
if [[ ! -d ${default_ccnet_conf_dir} ]]; then
echo "Error: there is no ccnet config directory."
echo "Have you run setup-seafile.sh before this?"
echo ""
exit -1;
fi
}
function read_seafile_data_dir () {
seafile_ini=${default_ccnet_conf_dir}/seafile.ini
if [[ ! -f ${seafile_ini} ]]; then
echo "${seafile_ini} not found. Now quit"
exit 1
fi
seafile_data_dir=$(cat "${seafile_ini}")
if [[ ! -d ${seafile_data_dir} ]]; then
echo "Your seafile server data directory \"${seafile_data_dir}\" is invalid or doesn't exits."
echo "Please check it first, or create this directory yourself."
echo ""
exit 1;
fi
}
function validate_seahub_running () {
if pgrep -f "${manage_py}" 2>/dev/null 1>&2; then
echo "Seahub is already running."
exit 1;
elif pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then
echo "Seahub is already running."
exit 1;
fi
}
function validate_port () {
if ! [[ ${port} =~ ^[1-9][0-9]{1,4}$ ]] ; then
printf "\033[033m${port}\033[m is not a valid port number\n\n"
usage;
exit 1
fi
}
if [[ ($1 == "start" || $1 == "restart" || $1 == "start-fastcgi" || $1 == "restart-fastcgi") \
&& ($# == 2 || $# == 1) ]]; then
if [[ $# == 2 ]]; then
port=$2
validate_port
else
port=8000
fi
elif [[ $1 == "stop" && $# == 1 ]]; then
dummy=dummy
elif [[ $1 == "clearsessions" && $# == 1 ]]; then
dummy=dummy
else
usage;
exit 1
fi
function warning_if_seafile_not_running () {
if ! pgrep -f "seafile-controller -c ${default_ccnet_conf_dir}" 2>/dev/null 1>&2; then
echo
echo "Warning: seafile-controller not running. Have you run \"./seafile.sh start\" ?"
echo
exit 1
fi
}
function prepare_seahub_log_dir() {
logdir=${TOPDIR}/logs
if ! [[ -d ${logsdir} ]]; then
if ! mkdir -p "${logdir}"; then
echo "ERROR: failed to create logs dir \"${logdir}\""
exit 1
fi
fi
export SEAHUB_LOG_DIR=${logdir}
}
function before_start() {
prepare_env;
warning_if_seafile_not_running;
validate_seahub_running;
prepare_seahub_log_dir;
}
function start_seahub () {
before_start;
echo "Starting seahub at port ${port} ..."
check_init_admin;
$PYTHON $gunicorn_exe seahub.wsgi:application -c "${gunicorn_conf}" -b "0.0.0.0:${port}" --preload
# Ensure seahub is started successfully
sleep 5
if ! pgrep -f "seahub.wsgi:application" 2>/dev/null 1>&2; then
printf "\033[33mError:Seahub failed to start.\033[m\n"
echo "Please try to run \"./seahub.sh start\" again"
exit 1;
fi
echo
echo "Seahub is started"
echo
}
function start_seahub_fastcgi () {
before_start;
# Returns 127.0.0.1 if SEAFILE_FASTCGI_HOST is unset or hasn't got any value,
# otherwise returns value of SEAFILE_FASTCGI_HOST environment variable
address=`(test -z "$SEAFILE_FASTCGI_HOST" && echo "127.0.0.1") || echo $SEAFILE_FASTCGI_HOST`
echo "Starting seahub (fastcgi) at ${address}:${port} ..."
check_init_admin;
$PYTHON "${manage_py}" runfcgi host=$address port=$port pidfile=$pidfile \
outlog=${accesslog} errlog=${errorlog}
# Ensure seahub is started successfully
sleep 5
if ! pgrep -f "${manage_py}" 1>/dev/null; then
printf "\033[33mError:Seahub failed to start.\033[m\n"
exit 1;
fi
echo
echo "Seahub is started"
echo
}
function prepare_env() {
check_python_executable;
validate_ccnet_conf_dir;
read_seafile_data_dir;
if [[ -z "$LANG" ]]; then
echo "LANG is not set in ENV, set to en_US.UTF-8"
export LANG='en_US.UTF-8'
fi
if [[ -z "$LC_ALL" ]]; then
echo "LC_ALL is not set in ENV, set to en_US.UTF-8"
export LC_ALL='en_US.UTF-8'
fi
export CCNET_CONF_DIR=${default_ccnet_conf_dir}
export SEAFILE_CONF_DIR=${seafile_data_dir}
export SEAFILE_CENTRAL_CONF_DIR=${central_config_dir}
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.6/site-packages:${INSTALLPATH}/seafile/lib64/python2.6/site-packages:${INSTALLPATH}/seahub:${INSTALLPATH}/seahub/thirdpart:$PYTHONPATH
export PYTHONPATH=${INSTALLPATH}/seafile/lib/python2.7/site-packages:${INSTALLPATH}/seafile/lib64/python2.7/site-packages:$PYTHONPATH
}
function clear_sessions () {
prepare_env;
echo "Start clear expired session records ..."
$PYTHON "${manage_py}" clearsessions
echo
echo "Done"
echo
}
function stop_seahub () {
if [[ -f ${pidfile} ]]; then
pid=$(cat "${pidfile}")
echo "Stopping seahub ..."
kill ${pid}
rm -f ${pidfile}
return 0
else
echo "Seahub is not running"
fi
}
function check_init_admin() {
check_init_admin_script=${INSTALLPATH}/check_init_admin.py
if ! $PYTHON $check_init_admin_script; then
exit 1
fi
}
case $1 in
"start" )
start_seahub;
;;
"start-fastcgi" )
start_seahub_fastcgi;
;;
"stop" )
stop_seahub;
;;
"restart" )
stop_seahub
sleep 2
start_seahub
;;
"restart-fastcgi" )
stop_seahub
sleep 2
start_seahub_fastcgi
;;
"clearsessions" )
clear_sessions
;;
esac
echo "Done."
echo ""

View File

@ -1,31 +0,0 @@
# Server Release Package
1. Libsearpc
cd libsearpc;
CFLAGS="-O2" configure --prefix=$dest
make install
2. Ccnet
cd ccnet;
CFLAGS="-O2" ./configure --enable-server-pkg --prefix=$dest
make install
3. Seafile
cd seafile;
CFLAGS="-O2" configure --enable-server-pkg --prefix=$dest
make install
4. copy shared libraries
scripts/cp-shared-lib.py $dest/lib
5. strip libs/executables
python do-strip.py
6. Update seahub
cd seahub
git fetch origin
git checkout release
git rebase origin/master
7. Pack
./pack-server.sh 1.0.0
DONE!

File diff suppressed because it is too large Load Diff

View File

@ -1,89 +0,0 @@
#!/bin/bash
########
### This script is a wrapper for setup-seafile-mysql.py
########
set -e
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
cd "$INSTALLPATH"
python_script=setup-seafile-mysql.py
function err_and_quit () {
printf "\n\n\033[33mError occured during setup. \nPlease fix possible problems and run the script again.\033[m\n\n"
exit 1;
}
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python2.7 2>/dev/null 1>&2; then
PYTHON=python2.7
elif which python27 2>/dev/null 1>&2; then
PYTHON=python27
else
echo
echo "Can't find a python executable of version 2.7 or above in PATH"
echo "Install python 2.7+ before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
}
function check_python_module () {
module=$1
name=$2
hint=$3
printf " Checking python module: ${name} ... "
if ! $PYTHON -c "import ${module}" 2>/dev/null 1>&2; then
echo
printf "\033[33m ${name} \033[m is not installed, Please install it first.\n"
if [[ "${hint}" != "" ]]; then
printf "${hint}"
echo
fi
err_and_quit;
fi
echo -e "Done."
}
function check_python () {
echo "Checking python on this machine ..."
check_python_executable
if ! which $PYTHON 2>/dev/null 1>&2; then
echo "No $PYTHON found on this machine. Please install it first."
err_and_quit;
else
if ($Python --version 2>&1 | grep "3\\.[0-9].\\.[0-9]") 2>/dev/null 1>&2 ; then
printf "\033[33m Python version 3.x \033[m detected\n"
echo "Python 3.x is not supported. Please use python 2.x. Now quit."
err_and_quit;
fi
if [[ $PYTHON == "python2.6" ]]; then
py26="2.6"
fi
hint="\nOn Debian/Ubntu: apt-get install python-setuptools\nOn CentOS/RHEL: yum install python${py26}-distribute"
check_python_module pkg_resources setuptools "${hint}"
hint="\nOn Debian/Ubntu: apt-get install python-imaging\nOn CentOS/RHEL: yum install python${py26}-imaging"
check_python_module PIL python-imaging "${hint}"
hint='\nOn Debian/Ubuntu:\n\nsudo apt-get install python-mysqldb\n\nOn CentOS/RHEL:\n\nsudo yum install MySQL-python'
check_python_module MySQLdb python-mysqldb "${hint}"
fi
echo
}
check_python;
export PYTHON=$PYTHON
exec $PYTHON "$python_script" "$@"

View File

@ -1,704 +0,0 @@
#!/bin/bash
SCRIPT=$(readlink -f "$0")
INSTALLPATH=$(dirname "${SCRIPT}")
TOPDIR=$(dirname "${INSTALLPATH}")
default_ccnet_conf_dir=${TOPDIR}/ccnet
default_seafile_data_dir=${TOPDIR}/seafile-data
default_seahub_db=${TOPDIR}/seahub.db
default_conf_dir=${TOPDIR}/conf
export SEAFILE_LD_LIBRARY_PATH=${INSTALLPATH}/seafile/lib/:${INSTALLPATH}/seafile/lib64:${LD_LIBRARY_PATH}
use_existing_ccnet="false"
use_existing_seafile="false"
server_manual_http="https://github.com/haiwen/seafile/wiki"
function welcome () {
echo "-----------------------------------------------------------------"
echo "This script will guide you to config and setup your seafile server."
echo -e "\nMake sure you have read seafile server manual at \n\n\t${server_manual_http}\n"
echo -e "Note: This script will guide your to setup seafile server using sqlite3,"
echo "which may have problems if your disk is on a NFS/CIFS/USB."
echo "In these cases, we sugguest you setup seafile server using MySQL."
echo
echo "Press [ENTER] to continue"
echo "-----------------------------------------------------------------"
read dummy
echo
}
function err_and_quit () {
printf "\n\n\033[33mError occured during setup. \nPlease fix possible issues and run the script again.\033[m\n\n"
exit 1;
}
function on_ctrl_c_pressed () {
printf "\n\n\033[33mYou have pressed Ctrl-C. Setup is interrupted.\033[m\n\n"
exit 1;
}
# clean newly created ccnet/seafile configs when exit on SIGINT
trap on_ctrl_c_pressed 2
function check_sanity () {
if ! [[ -d ${INSTALLPATH}/seahub && -d ${INSTALLPATH}/seafile \
&& -d ${INSTALLPATH}/runtime ]]; then
echo
echo "The seafile-server diretory doesn't contain all needed files."
echo "Please make sure you have extracted all files and folders from tarball."
err_and_quit;
fi
}
function read_yes_no () {
printf "[yes|no] "
read yesno;
while [[ "${yesno}" != "yes" && "${yesno}" != "no" ]]
do
printf "please answer [yes|no] "
read yesno;
done
if [[ "${yesno}" == "no" ]]; then
return 1;
else
return 0;
fi
}
function check_existing_ccnet () {
if [[ -d ${default_ccnet_conf_dir} ]]; then
echo "It seems that you have created a ccnet configuration before. "
echo "Would you like to use the existing configuration?"
if ! read_yes_no; then
echo
echo "Please remove the existing configuration before continuing."
echo "You can do it by running \"rm -rf ${default_ccnet_conf_dir}\""
echo
exit 1;
else
echo
echo "Existing ccnet configuration is being used."
use_existing_ccnet=true
fi
fi
echo
}
function check_python_executable() {
if [[ "$PYTHON" != "" && -x $PYTHON ]]; then
return 0
fi
if which python2.7 2>/dev/null 1>&2; then
PYTHON=python2.7
elif which python27 2>/dev/null 1>&2; then
PYTHON=python27
else
echo
echo "Can't find a python executable of version 2.7 or above in PATH"
echo "Install python 2.7+ before continue."
echo "Or if you installed it in a non-standard PATH, set the PYTHON enviroment varirable to it"
echo
exit 1
fi
echo "Find python: $PYTHON"
echo
}
function check_python_module () {
module=$1
name=$2
hint=$3
printf " Checking python module: ${name} ... "
if ! $PYTHON -c "import ${module}" 2>/dev/null 1>&2; then
echo
printf "\033[33m ${name} \033[m is not installed, Please install it first.\n"
if [[ "${hint}" != "" ]]; then
printf "${hint}"
echo
fi
err_and_quit;
fi
echo -e "Done."
}
function check_python () {
echo "Checking python on this machine ..."
check_python_executable
if ! which $PYTHON 2>/dev/null 1>&2; then
echo "No $PYTHON found on this machine. Please install it first."
err_and_quit;
else
if ($Python --version 2>&1 | grep "3\\.[0-9].\\.[0-9]") 2>/dev/null 1>&2 ; then
printf "\033[33m Python version 3.x \033[m detected\n"
echo "Python 3.x is not supported. Please use python 2.x."
err_and_quit;
fi
if [[ $PYTHON == "python2.6" ]]; then
py26="2.6"
fi
hint="\nOn Debian/Ubntu: apt-get install python-setuptools\nOn CentOS/RHEL: yum install python${py26}-distribute"
check_python_module pkg_resources setuptools "${hint}"
hint="\nOn Debian/Ubntu: apt-get install python-imaging\nOn CentOS/RHEL: yum install python${py26}-imaging"
check_python_module PIL python-imaging "${hint}"
check_python_module sqlite3 python-sqlite3
fi
echo
}
function check_sqlite3 () {
echo -n "Checking for sqlite3 ..."
if ! which sqlite3 2>/dev/null 1>&2; then
echo -e "\nSqlite3 is not found. install it first.\n"
echo "On Debian/Ubuntu: apt-get install sqlite3"
echo "On CentOS/RHEL: yum install sqlite"
err_and_quit;
fi
printf "Done.\n\n"
}
function check_system_dependency () {
printf "Checking packages needed by seafile ...\n\n"
check_python;
check_sqlite3;
printf "Checking Done.\n\n"
}
function ask_question () {
question=$1
default=$2
key=$3
printf "${question}"
printf "\n"
if [[ "${default}" != "" && "${default}" != "nodefault" ]] ; then
printf "[default: ${default} ] "
elif [[ "${key}" != "" ]]; then
printf "[${key}]: "
fi
}
function get_server_name () {
question="What would you like to use as the name of this seafile server?\nYour seafile users will be able to see the name in their seafile client."
hint="You can use a-z, A-Z, 0-9, _ and -, and the length should be 3 ~ 15"
ask_question "${question}\n${hint}" "nodefault" "server name"
read server_name
if [[ "${server_name}" == "" ]]; then
echo
echo "server name cannot be empty"
get_server_name
elif [[ ! ${server_name} =~ ^[a-zA-Z0-9_-]{3,14}$ ]]; then
printf "\n\033[33m${server_name}\033[m is not a valid name.\n"
get_server_name;
fi
echo
}
function get_server_ip_or_domain () {
question="What is the ip or domain of this server?\nFor example, www.mycompany.com, or, 192.168.1.101"
ask_question "${question}\n" "nodefault" "This server's ip or domain"
read ip_or_domain
if [[ "${ip_or_domain}" == "" ]]; then
echo
echo "ip or domain cannot be empty"
get_server_ip_or_domain
fi
echo
}
function get_ccnet_server_port () {
question="What tcp port do you want to use for ccnet server?"
hint="10001 is the recommended port."
default="10001"
ask_question "${question}\n${hint}" "${default}"
read server_port
if [[ "${server_port}" == "" ]]; then
server_port="${default}"
fi
if [[ ! ${server_port} =~ ^[0-9]+$ ]]; then
echo "\"${server_port}\" is not a valid port number. "
get_ccnet_server_port
fi
echo
}
function get_seafile_server_port () {
question="What tcp port would you like to use for seafile server?"
hint="12001 is the recommended port."
default="12001"
ask_question "${question}\n${hint}" "${default}"
read seafile_server_port
if [[ "${seafile_server_port}" == "" ]]; then
seafile_server_port="${default}"
fi
if [[ ! ${seafile_server_port} =~ ^[0-9]+$ ]]; then
echo "\"${seafile_server_port}\" is not a valid port number. "
get_seafile_server_port
fi
echo
}
function get_fileserver_port () {
question="What tcp port do you want to use for seafile fileserver?"
hint="8082 is the recommended port."
default="8082"
ask_question "${question}\n${hint}" "${default}"
read fileserver_port
if [[ "${fileserver_port}" == "" ]]; then
fileserver_port="${default}"
fi
if [[ ! ${fileserver_port} =~ ^[0-9]+$ ]]; then
echo "\"${fileserver_port}\" is not a valid port number. "
get_fileserver_port
fi
echo
}
function get_seafile_data_dir () {
question="Where would you like to store your seafile data?"
note="Please use a volume with enough free space."
default=${default_seafile_data_dir}
ask_question "${question} \n\033[33mNote: \033[m${note}" "${default}"
read seafile_data_dir
if [[ "${seafile_data_dir}" == "" ]]; then
seafile_data_dir=${default}
fi
if [[ -d ${seafile_data_dir} && -f ${seafile_data_dir}/seafile.conf ]]; then
echo
echo "It seems that you have already existing seafile data in ${seafile_data_dir}."
echo "Would you like to use the existing seafile data?"
if ! read_yes_no; then
echo "You have chosen not to use existing seafile data in ${seafile_data_dir}"
echo "You need to specify a different seafile data directory or remove ${seafile_data_dir} before continuing."
get_seafile_data_dir
else
use_existing_seafile="true"
fi
elif [[ -d ${seafile_data_dir} && $(ls -A ${seafile_data_dir}) != "" ]]; then
echo
echo "${seafile_data_dir} is an existing non-empty directory. Please specify a different directory"
echo
get_seafile_data_dir
elif [[ ! ${seafile_data_dir} =~ ^/ ]]; then
echo
echo "\"${seafile_data_dir}\" is not an absolute path. Please specify an absolute path."
echo
get_seafile_data_dir
elif [[ ! -d $(dirname ${seafile_data_dir}) ]]; then
echo
echo "The path $(dirname ${seafile_data_dir}) does not exist."
echo
get_seafile_data_dir
fi
echo
}
function gen_seafdav_conf () {
mkdir -p ${default_conf_dir}
seafdav_conf=${default_conf_dir}/seafdav.conf
if ! $(cat > ${seafdav_conf} <<EOF
[WEBDAV]
enabled = false
port = 8080
fastcgi = false
host = 0.0.0.0
share_name = /
EOF
); then
echo "failed to generate seafdav.conf";
err_and_quit
fi
}
function copy_user_manuals() {
src_docs_dir=${INSTALLPATH}/seafile/docs/
library_template_dir=${seafile_data_dir}/library-template
mkdir -p ${library_template_dir}
cp -f ${src_docs_dir}/*.doc ${library_template_dir}
}
function parse_params() {
while getopts n:i:p:d: arg; do
case $arg in
n)
server_name=${OPTARG}
;;
i)
ip_or_domain=${OPTARG}
;;
p)
fileserver_port=${OPTARG}
;;
d)
seafile_data_dir=${OPTARG}
;;
esac
done
}
function validate_params() {
# server_name default hostname -s
if [[ "$server_name" == "" ]]; then
server_name=${SERVER_NAME:-`hostname -s`}
fi
if [[ ! ${server_name} =~ ^[a-zA-Z0-9_-]{3,14}$ ]]; then
echo "Invalid server name param"
err_and_quit;
fi
# ip_or_domain default hostname -i
if [[ "$ip_or_domain" == "" ]]; then
ip_or_domain=${SERVER_IP:-`hostname -i`}
fi
if [[ "$ip_or_domain" != "" && ! ${ip_or_domain} =~ ^[^.].+\..+[^.]$ ]]; then
echo "Invalid ip or domain param"
err_and_quit;
fi
# fileserver_port default 8082
if [[ "${fileserver_port}" == "" ]]; then
fileserver_port=${FILESERVER_PORT:-8082}
fi
if [[ ! ${fileserver_port} =~ ^[0-9]+$ ]]; then
echo "Invalid fileserver port param"
err_and_quit;
fi
if [[ "${seafile_data_dir}" == "" ]]; then
seafile_data_dir=${SEAFILE_DIR:-${default_seafile_data_dir}}
fi
if [[ -d ${seafile_data_dir} && $(ls -A ${seafile_data_dir}) != "" ]]; then
echo "${seafile_data_dir} is an existing non-empty directory. Please specify a different directory"
err_and_quit
elif [[ ! ${seafile_data_dir} =~ ^/ ]]; then
echo "\"${seafile_data_dir}\" is not an absolute path. Please specify an absolute path."
err_and_quit
elif [[ ! -d $(dirname ${seafile_data_dir}) ]]; then
echo "The path $(dirname ${seafile_data_dir}) does not exist."
err_and_quit
fi
}
function usage() {
echo "auto mode:"
echo -e "$0 auto\n" \
"-n server name\n" \
"-i ip or domain\n" \
"-p fileserver port\n" \
"-d seafile dir to store seafile data"
echo ""
echo "interactive mode:"
echo "$0"
}
# -------------------------------------------
# Main workflow of this script
# -------------------------------------------
for param in $@; do
if [[ "$param" == "-h" || "$param" == "--help" ]]; then
usage;
exit 0
fi
done
need_pause=1
if [[ $# -ge 1 && "$1" == "auto" ]]; then
# auto mode, no pause
shift
parse_params $@;
validate_params;
need_pause=0
fi
check_sanity;
if [[ "${need_pause}" == "1" ]]; then
welcome;
fi
sleep .5
check_system_dependency;
sleep .5
check_existing_ccnet;
if [[ ${use_existing_ccnet} != "true" ]]; then
if [[ "${server_name}" == "" ]]; then
get_server_name;
fi
if [[ "${ip_or_domain}" == "" ]]; then
get_server_ip_or_domain;
fi
# get_ccnet_server_port;
fi
if [[ "$seafile_data_dir" == "" ]]; then
get_seafile_data_dir;
fi
if [[ ${use_existing_seafile} != "true" ]]; then
# get_seafile_server_port
if [[ "$fileserver_port" == "" ]]; then
get_fileserver_port
fi
fi
sleep .5
printf "\nThis is your config information:\n\n"
if [[ ${use_existing_ccnet} != "true" ]]; then
printf "server name: \033[33m${server_name}\033[m\n"
printf "server ip/domain: \033[33m${ip_or_domain}\033[m\n"
else
printf "ccnet config: use existing config in \033[33m${default_ccnet_conf_dir}\033[m\n"
fi
if [[ ${use_existing_seafile} != "true" ]]; then
printf "seafile data dir: \033[33m${seafile_data_dir}\033[m\n"
printf "fileserver port: \033[33m${fileserver_port}\033[m\n"
else
printf "seafile data dir: use existing data in \033[33m${seafile_data_dir}\033[m\n"
fi
if [[ "${need_pause}" == "1" ]]; then
echo
echo "If you are OK with the configuration, press [ENTER] to continue."
read dummy
fi
ccnet_init=${INSTALLPATH}/seafile/bin/ccnet-init
seaf_server_init=${INSTALLPATH}/seafile/bin/seaf-server-init
# -------------------------------------------
# Create ccnet conf
# -------------------------------------------
if [[ "${use_existing_ccnet}" != "true" ]]; then
echo "Generating ccnet configuration in ${default_ccnet_conf_dir}..."
echo
if ! LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH "${ccnet_init}" \
-F "${default_conf_dir}" \
-c "${default_ccnet_conf_dir}" \
--name "${server_name}" \
--host "${ip_or_domain}"; then
err_and_quit;
fi
echo
fi
sleep 0.5
# -------------------------------------------
# Create seafile conf
# -------------------------------------------
if [[ "${use_existing_seafile}" != "true" ]]; then
echo "Generating seafile configuration in ${seafile_data_dir} ..."
echo
if ! LD_LIBRARY_PATH=$SEAFILE_LD_LIBRARY_PATH ${seaf_server_init} \
--central-config-dir "${default_conf_dir}" \
--seafile-dir "${seafile_data_dir}" \
--fileserver-port ${fileserver_port}; then
echo "Failed to generate seafile configuration"
err_and_quit;
fi
echo
fi
# -------------------------------------------
# Write seafile.ini
# -------------------------------------------
echo "${seafile_data_dir}" > "${default_ccnet_conf_dir}/seafile.ini"
# -------------------------------------------
# Generate seafevents.conf
# -------------------------------------------
gen_seafdav_conf;
# -------------------------------------------
# generate seahub/settings.py
# -------------------------------------------
dest_settings_py=${TOPDIR}/conf/seahub_settings.py
seahub_secret_keygen=${INSTALLPATH}/seahub/tools/secret_key_generator.py
if [[ ! -f ${dest_settings_py} ]]; then
echo -n "SECRET_KEY = " >> "${dest_settings_py}"
key=$($PYTHON "${seahub_secret_keygen}")
echo "\"${key}\"" >> "${dest_settings_py}"
fi
# -------------------------------------------
# Seahub related config
# -------------------------------------------
if [[ "${need_pause}" == "1" ]]; then
echo "-----------------------------------------------------------------"
echo "Seahub is the web interface for seafile server."
echo "Now let's setup seahub configuration. Press [ENTER] to continue"
echo "-----------------------------------------------------------------"
echo
read dummy
fi
# echo "Please specify the email address and password for the seahub administrator."
# echo "You can use them to login as admin on your seahub website."
# echo
function get_seahub_admin_email () {
question="Please specify the email address for the seahub administrator:"
ask_question "${question}" "nodefault" "seahub admin email"
read seahub_admin_email
if [[ "${seahub_admin_email}" == "" ]]; then
echo "Seahub admin user name cannot be empty."
get_seahub_admin_email;
elif [[ ! ${seahub_admin_email} =~ ^.+@.*\..+$ ]]; then
echo "${seahub_admin_email} is not a valid email address"
get_seahub_admin_email;
fi
}
function get_seahub_admin_passwd () {
echo
question="Please specify the password you would like to use for seahub administrator:"
ask_question "${question}" "nodefault" "seahub admin password"
read -s seahub_admin_passwd
echo
question="Please enter the password again:"
ask_question "${question}" "nodefault" "seahub admin password again"
read -s seahub_admin_passwd_again
echo
if [[ "${seahub_admin_passwd}" != "${seahub_admin_passwd_again}" ]]; then
printf "\033[33mThe passwords didn't match.\033[m"
get_seahub_admin_passwd;
elif [[ "${seahub_admin_passwd}" == "" ]]; then
echo "Password cannot be empty."
get_seahub_admin_passwd;
fi
}
# get_seahub_admin_email;
# sleep .5;
# get_seahub_admin_passwd;
# seahub_admin_passwd_enc=$(echo -n ${seahub_admin_passwd} | sha1sum | grep -o "[0-9a-f]*")
# sleep .5;
# printf "\n\n"
# echo "This is your seahub admin username/password"
# echo
# printf "admin username: \033[33m${seahub_admin_email}\033[m\n"
# printf "admin password: \033[33m**************\033[m\n\n"
# echo
# echo "If you are OK with the configuration, press [ENTER] to continue."
# read dummy
# usermgr_db_dir=${default_ccnet_conf_dir}/PeerMgr/
# usermgr_db=${usermgr_db_dir}/usermgr.db
# if [[ "${use_existing_ccnet}" != "true" ]]; then
# # create admin user/passwd entry in ccnet db
# if ! mkdir -p "${usermgr_db_dir}"; then
# echo "Failed to create seahub admin."
# err_and_quit;
# fi
# sql="CREATE TABLE IF NOT EXISTS EmailUser (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, email TEXT, passwd TEXT, is_staff bool NOT NULL, is_active bool NOT NULL, ctime INTEGER)";
# if ! sqlite3 "${usermgr_db}" "${sql}" ; then
# rm -f "${usermgr_db}"
# echo "Failed to create seahub admin."
# err_and_quit;
# fi
# sql="INSERT INTO EmailUser(email, passwd, is_staff, is_active, ctime) VALUES (\"${seahub_admin_email}\", \"${seahub_admin_passwd_enc}\", 1, 1, 0);"
# if ! sqlite3 "${usermgr_db}" "${sql}" ; then
# rm -f "${usermgr_db}"
# echo "Failed to create seahub admin."
# err_and_quit;
# fi
# fi
echo "Creating seahub database now, it may take one minute, please wait... "
echo
seahub_db=${TOPDIR}/seahub.db
seahub_sqls=${INSTALLPATH}/seahub/sql/sqlite3.sql
if ! sqlite3 ${seahub_db} ".read ${seahub_sqls}" 2>/dev/null 1>&2; then
echo "Failed to sync seahub database."
err_and_quit;
fi
echo
echo "Done."
# prepare avatar folder
media_dir=${INSTALLPATH}/seahub/media
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
if [[ ! -d ${dest_avatar_dir} ]]; then
mkdir -p "${TOPDIR}/seahub-data"
mv "${orig_avatar_dir}" "${dest_avatar_dir}"
ln -s ../../../seahub-data/avatars ${media_dir}
fi
# Make a seafile-server symlink, like this:
# /data/haiwen/
# -- seafile-server-2.0.4
# -- seafile-server-latest # symlink to 2.0.4
seafile_server_symlink=${TOPDIR}/seafile-server-latest
echo
echo -n "creating seafile-server-latest symbolic link ... "
if ! ln -s $(basename ${INSTALLPATH}) ${seafile_server_symlink}; then
echo
echo
echo "Failed to create symbolic link ${seafile_server_symlink}"
err_and_quit;
fi
echo "done"
echo
chmod 0600 "$dest_settings_py"
chmod 0700 "$default_ccnet_conf_dir"
chmod 0700 "$seafile_data_dir"
chmod 0700 "$default_conf_dir"
# -------------------------------------------
# copy user manuals to library template
# -------------------------------------------
copy_user_manuals;
# -------------------------------------------
# final message
# -------------------------------------------
sleep 1
echo
echo "-----------------------------------------------------------------"
echo "Your seafile server configuration has been completed successfully."
echo "-----------------------------------------------------------------"
echo
echo "run seafile server: ./seafile.sh { start | stop | restart }"
echo "run seahub server: ./seahub.sh { start <port> | stop | restart <port> }"
echo
echo "-----------------------------------------------------------------"
echo "If the server is behind a firewall, remember to open these tcp ports:"
echo "-----------------------------------------------------------------"
echo
echo "port of seafile fileserver: ${fileserver_port}"
echo "port of seahub: 8000"
echo
echo -e "When problems occur, refer to\n"
echo -e " ${server_manual_http}\n"
echo "for more information."
echo

View File

@ -1,83 +0,0 @@
#!/usr/bin/env python
"""Lifted from:
http://stackoverflow.com/questions/18671/quick-easy-way-to-migrate-sqlite3-to-mysql
Run like so:
sqlite3 <your db>.db .dump | python sqlite2mysql.py > <your db>.sql
Then you can import the .sql file into MySql
Note - you need to add foreign key constrains manually since sqlite doesn't actually support them
"""
import re
import fileinput
def this_line_is_useless(line):
useless_es = [
'BEGIN TRANSACTION',
'COMMIT',
'sqlite_sequence',
'CREATE UNIQUE INDEX',
'PRAGMA',
]
for useless in useless_es:
if re.search(useless, line):
return True
def has_primary_key(line):
return bool(re.search(r'PRIMARY KEY', line))
for line in fileinput.input():
searching_for_end = False
if this_line_is_useless(line): continue
# this line was necessary because ''); was getting
# converted (inappropriately) to \');
if re.match(r".*, ''\);", line):
line = re.sub(r"''\);", r'``);', line)
if re.match(r'^CREATE TABLE.*', line):
searching_for_end = True
m = re.search('CREATE TABLE [`"]?(\w*)[`"]?(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"','`')
line = "DROP TABLE IF EXISTS `%(name)s`;\nCREATE TABLE IF NOT EXISTS `%(name)s`%(sub)s\n"
line = line % dict(name=name, sub=sub)
else:
m = re.search('INSERT INTO "(\w*)"(.*)', line)
if m:
name, sub = m.groups()
line = 'INSERT INTO `%s`%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
# line = re.sub(r"([^'])'t'(.)", r"\1THIS_IS_TRUE\2", line)
# line = line.replace('THIS_IS_TRUE', '1')
# line = re.sub(r"([^'])'f'(.)", r"\1THIS_IS_FALSE\2", line)
# line = line.replace('THIS_IS_FALSE', '0')
# Add auto_increment if it's not there since sqlite auto_increments ALL
# primary keys
if searching_for_end:
if re.search(r"integer(?:\s+\w+)*\s*PRIMARY KEY(?:\s+\w+)*\s*,", line, re.I):
line = line.replace("PRIMARY KEY", "PRIMARY KEY AUTO_INCREMENT")
# replace " and ' with ` because mysql doesn't like quotes in CREATE commands
line = line.replace('"', '`').replace("'", '`')
# And now we convert it back (see above)
if re.match(r".*, ``\);", line):
line = re.sub(r'``\);', r"'');", line)
if searching_for_end and re.match(r'.*\);', line):
searching_for_end = False
if re.match(r"CREATE INDEX", line):
line = re.sub('"', '`', line)
line = line.replace('"', '`')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
print line,

View File

@ -1,118 +0,0 @@
#!/bin/sh
#
# This shell script and corresponding sqlite2mysql.py are used to
# migrate Seafile data from SQLite to MySQL.
#
# Setup:
#
# 1. Move this file and sqlite2mysql.py to the top directory of your Seafile
# installation path (e.g. /data/haiwen).
# 2. Run: ./sqlite2mysql.sh
# 3. Three files(ccnet-db.sql, seafile-db.sql, seahub-db.sql) are created.
# 4. Loads these files to MySQL
# (mysql> source ccnet-db.sql)
#
CCNET_DB='ccnet-db.sql'
SEAFILE_DB='seafile-db.sql'
SEAHUB_DB='seahub-db.sql'
########## ccnet
seafile_path=$(pwd)
if [ -f "${seafile_path}/ccnet/ccnet.conf" ]; then
USER_MGR_DB=${seafile_path}/ccnet/PeerMgr/usermgr.db
GRP_MGR_DB=${seafile_path}/ccnet/GroupMgr/groupmgr.db
else
echo "${seafile_path}/ccnet/ccnet.conf does not exists."
read -p "Please provide your ccnet.conf path(e.g. /data/haiwen/ccnet/ccnet.conf): " ccnet_conf_path
if [ -f ${ccnet_conf_path} ]; then
USER_MGR_DB=$(dirname "${ccnet_conf_path}")/PeerMgr/usermgr.db
GRP_MGR_DB=$(dirname "${ccnet_conf_path}")/GroupMgr/groupmgr.db
else
echo "${ccnet_conf_path} does not exists, quit."
exit 1
fi
fi
rm -rf ${CCNET_DB}
echo "sqlite3 ${USER_MGR_DB} .dump | python sqlite2mysql.py > ${CCNET_DB}"
sqlite3 ${USER_MGR_DB} .dump | python sqlite2mysql.py > ${CCNET_DB}
echo "sqlite3 ${GRP_MGR_DB} .dump | python sqlite2mysql.py >> ${CCNET_DB}"
sqlite3 ${GRP_MGR_DB} .dump | python sqlite2mysql.py >> ${CCNET_DB}
# change ctime from INTEGER to BIGINT in EmailUser table
sed 's/ctime INTEGER/ctime BIGINT/g' ${CCNET_DB} > ${CCNET_DB}.tmp && mv ${CCNET_DB}.tmp ${CCNET_DB}
# change email in UserRole from TEXT to VARCHAR(255)
sed 's/email TEXT, role TEXT/email VARCHAR(255), role TEXT/g' ${CCNET_DB} > ${CCNET_DB}.tmp && mv ${CCNET_DB}.tmp ${CCNET_DB}
########## seafile
rm -rf ${SEAFILE_DB}
if [ -f "${seafile_path}/seafile-data/seafile.db" ]; then
echo "sqlite3 ${seafile_path}/seafile-data/seafile.db .dump | python sqlite2mysql.py > ${SEAFILE_DB}"
sqlite3 ${seafile_path}/seafile-data/seafile.db .dump | python sqlite2mysql.py > ${SEAFILE_DB}
else
echo "${seafile_path}/seafile-data/seafile.db does not exists."
read -p "Please provide your seafile.db path(e.g. /data/haiwen/seafile-data/seafile.db): " seafile_db_path
if [ -f ${seafile_db_path} ];then
echo "sqlite3 ${seafile_db_path} .dump | python sqlite2mysql.py > ${SEAFILE_DB}"
sqlite3 ${seafile_db_path} .dump | python sqlite2mysql.py > ${SEAFILE_DB}
else
echo "${seafile_db_path} does not exists, quit."
exit 1
fi
fi
# change owner_id in RepoOwner from TEXT to VARCHAR(255)
sed 's/owner_id TEXT/owner_id VARCHAR(255)/g' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
# change user_name in RepoGroup from TEXT to VARCHAR(255)
sed 's/user_name TEXT/user_name VARCHAR(255)/g' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
########## seahub
rm -rf ${SEAHUB_DB}
if [ -f "${seafile_path}/seahub.db" ]; then
echo "sqlite3 ${seafile_path}/seahub.db .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}"
sqlite3 ${seafile_path}/seahub.db .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}
else
echo "${seafile_path}/seahub.db does not exists."
read -p "Please prove your seahub.db path(e.g. /data/haiwen/seahub.db): " seahub_db_path
if [ -f ${seahub_db_path} ]; then
echo "sqlite3 ${seahub_db_path} .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}"
sqlite3 ${seahub_db_path} .dump | tr -d '\n' | sed 's/;/;\n/g' | python sqlite2mysql.py > ${SEAHUB_DB}
else
echo "${seahub_db_path} does not exists, quit."
exit 1
fi
fi
# change username from VARCHAR(256) to VARCHAR(255) in wiki_personalwiki
sed 's/varchar(256) NOT NULL UNIQUE/varchar(255) NOT NULL UNIQUE/g' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
# remove unique from contacts_contact
sed 's/, UNIQUE (`user_email`, `contact_email`)//g' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
# remove base_dirfileslastmodifiedinfo records to avoid json string parsing issue between sqlite and mysql
sed '/INSERT INTO `base_dirfileslastmodifiedinfo`/d' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
# remove notifications_usernotification records to avoid json string parsing issue between sqlite and mysql
sed '/INSERT INTO `notifications_usernotification`/d' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
########## common logic
# add ENGIN=INNODB to create table statment
for sql_file in $CCNET_DB $SEAFILE_DB $SEAHUB_DB
do
sed -r 's/(CREATE TABLE.*);/\1 ENGINE=INNODB;/g' $sql_file > $sql_file.tmp && mv $sql_file.tmp $sql_file
done
# remove COLLATE NOCASE if possible
for sql_file in $CCNET_DB $SEAFILE_DB $SEAHUB_DB
do
sed 's/COLLATE NOCASE//g' $sql_file > $sql_file.tmp && mv $sql_file.tmp $sql_file
done

View File

@ -1,75 +0,0 @@
#!/bin/sh
#
# This shell script is used to add COLLATE NOCASE to email field to avoid case
# issue in sqlite.
#
# 1. ./add-collate.sh <ccnet_dir> <seafile_dir> <seahub_db>
#
USER_DB='/tmp/user-db.sql'
GROUP_DB='/tmp/group-db.sql'
SEAFILE_DB='/tmp/seafile-db.sql'
SEAHUB_DB='/tmp/seahub-db.sql'
ccnet_dir=$1
########## ccnet
USER_MGR_DB=${ccnet_dir}/PeerMgr/usermgr.db
GRP_MGR_DB=${ccnet_dir}/GroupMgr/groupmgr.db
rm -rf ${USER_DB}
rm -rf ${GROUP_DB}
echo "sqlite3 ${USER_MGR_DB} .dump > ${USER_DB}"
sqlite3 ${USER_MGR_DB} .dump > ${USER_DB}
echo "sqlite3 ${GRP_MGR_DB} .dump > ${GROUP_DB}"
sqlite3 ${GRP_MGR_DB} .dump > ${GROUP_DB}
sed -r 's/(CREATE TABLE EmailUser.*)email TEXT,(.*)/\1email TEXT COLLATE NOCASE,\2/I' ${USER_DB} > ${USER_DB}.tmp && mv ${USER_DB}.tmp ${USER_DB}
sed -r 's/(CREATE TABLE Binding.*)email TEXT,(.*)/\1email TEXT COLLATE NOCASE,\2/I' ${USER_DB} > ${USER_DB}.tmp && mv ${USER_DB}.tmp ${USER_DB}
sed -r 's/(CREATE TABLE `Group`.*)`creator_name` VARCHAR\(255\),(.*)/\1`creator_name` VARCHAR\(255\) COLLATE NOCASE,\2/I' ${GROUP_DB} > ${GROUP_DB}.tmp && mv ${GROUP_DB}.tmp ${GROUP_DB}
sed -r 's/(CREATE TABLE `GroupUser`.*)`user_name` VARCHAR\(255\),(.*)/\1`user_name` VARCHAR\(255\) COLLATE NOCASE,\2/I' ${GROUP_DB} > ${GROUP_DB}.tmp && mv ${GROUP_DB}.tmp ${GROUP_DB}
# backup & restore
mv ${USER_MGR_DB} ${USER_MGR_DB}.`date +"%Y%m%d%H%M%S"`
mv ${GRP_MGR_DB} ${GRP_MGR_DB}.`date +"%Y%m%d%H%M%S"`
sqlite3 ${USER_MGR_DB} < ${USER_DB}
sqlite3 ${GRP_MGR_DB} < ${GROUP_DB}
########## seafile
rm -rf ${SEAFILE_DB}
SEAFILE_DB_FILE=$2/seafile.db
echo "sqlite3 ${SEAFILE_DB_FILE} .dump > ${SEAFILE_DB}"
sqlite3 ${SEAFILE_DB_FILE} .dump > ${SEAFILE_DB}
sed -r 's/(CREATE TABLE RepoOwner.*)owner_id TEXT(.*)/\1owner_id TEXT COLLATE NOCASE\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
sed -r 's/(CREATE TABLE RepoGroup.*)user_name TEXT,(.*)/\1user_name TEXT COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
sed -r 's/(CREATE TABLE RepoUserToken.*)email VARCHAR\(255\),(.*)/\1email VARCHAR\(255\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
sed -r 's/(CREATE TABLE UserQuota.*)user VARCHAR\(255\),(.*)/\1user VARCHAR\(255\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
sed -r 's/(CREATE TABLE SharedRepo.*)from_email VARCHAR\(512\), to_email VARCHAR\(512\),(.*)/\1from_email VARCHAR\(512\), to_email VARCHAR\(512\) COLLATE NOCASE,\2/I' ${SEAFILE_DB} > ${SEAFILE_DB}.tmp && mv ${SEAFILE_DB}.tmp ${SEAFILE_DB}
# backup & restore
mv ${SEAFILE_DB_FILE} ${SEAFILE_DB_FILE}.`date +"%Y%m%d%H%M%S"`
sqlite3 ${SEAFILE_DB_FILE} < ${SEAFILE_DB}
########## seahub
rm -rf ${SEAHUB_DB}
SEAHUB_DB_FILE=$3
echo "sqlite3 ${SEAHUB_DB_FILE} .Dump | tr -d '\n' | sed 's/;/;\n/g' > ${SEAHUB_DB}"
sqlite3 ${SEAHUB_DB_FILE} .dump | tr -d '\n' | sed 's/;/;\n/g' > ${SEAHUB_DB}
sed -r 's/(CREATE TABLE "notifications_usernotification".*)"to_user" varchar\(255\) NOT NULL,(.*)/\1"to_user" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
sed -r 's/(CREATE TABLE "profile_profile".*)"user" varchar\(75\) NOT NULL UNIQUE,(.*)/\1"user" varchar\(75\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
sed -r 's/(CREATE TABLE "share_fileshare".*)"username" varchar\(255\) NOT NULL,(.*)/\1"username" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
sed -r 's/(CREATE TABLE "api2_token".*)"user" varchar\(255\) NOT NULL UNIQUE,(.*)/\1"user" varchar\(255\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
sed -r 's/(CREATE TABLE "wiki_personalwiki".*)"username" varchar\(256\) NOT NULL UNIQUE,(.*)/\1"username" varchar\(256\) NOT NULL UNIQUE COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
sed -r 's/(CREATE TABLE "message_usermessage".*)"from_email" varchar\(75\) NOT NULL,\s*"to_email" varchar\(75\) NOT NULL,(.*)/\1"from_email" varchar\(75\) NOT NULL COLLATE NOCASE, "to_email" varchar\(75\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
sed -r 's/(CREATE TABLE "avatar_avatar".*)"emailuser" varchar\(255\) NOT NULL,(.*)/\1"emailuser" varchar\(255\) NOT NULL COLLATE NOCASE,\2/I' ${SEAHUB_DB} > ${SEAHUB_DB}.tmp && mv ${SEAHUB_DB}.tmp ${SEAHUB_DB}
# backup & restore
mv ${SEAHUB_DB_FILE} ${SEAHUB_DB_FILE}.`date +"%Y%m%d%H%M%S"`
sqlite3 ${SEAHUB_DB_FILE} < ${SEAHUB_DB}
rm -rf ${USER_DB} ${GROUP_DB} ${SEAFILE_DB} ${SEAHUB_DB}

View File

@ -1,52 +0,0 @@
#!/usr/bin/env python
import sqlite3
import os
import sys
def usage():
msg = 'usage: %s <seahub db>' % os.path.basename(sys.argv[0])
print msg
def main():
seahub_db = sys.argv[1]
conn = sqlite3.connect(seahub_db)
c = conn.cursor()
try:
c.execute('SELECT s_type from share_fileshare')
except sqlite3.OperationalError:
# only add this column if not exist yet, so this script is idempotent
c.execute('ALTER table share_fileshare add column "s_type" varchar(2) NOT NULL DEFAULT "f"')
c.execute('CREATE INDEX IF NOT EXISTS "share_fileshare_f775835c" ON "share_fileshare" ("s_type")')
sql = '''CREATE TABLE IF NOT EXISTS "base_dirfileslastmodifiedinfo" (
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
"repo_id" varchar(36) NOT NULL,
"parent_dir" text NOT NULL,
"parent_dir_hash" varchar(12) NOT NULL,
"dir_id" varchar(40) NOT NULL,
"last_modified_info" text NOT NULL,
UNIQUE ("repo_id", "parent_dir_hash"))'''
c.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS "api2_token" (
"key" varchar(40) NOT NULL PRIMARY KEY,
"user" varchar(255) NOT NULL UNIQUE,
"created" datetime NOT NULL)'''
c.execute(sql)
conn.commit()
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
sys.exit(1)
main()

View File

@ -1,362 +0,0 @@
#coding: UTF-8
import sys
import os
import ConfigParser
import glob
HAS_MYSQLDB = True
try:
import MySQLdb
except ImportError:
HAS_MYSQLDB = False
HAS_SQLITE3 = True
try:
import sqlite3
except ImportError:
HAS_SQLITE3 = False
class EnvManager(object):
def __init__(self):
self.upgrade_dir = os.path.dirname(__file__)
self.install_path = os.path.dirname(self.upgrade_dir)
self.top_dir = os.path.dirname(self.install_path)
self.ccnet_dir = os.environ['CCNET_CONF_DIR']
self.seafile_dir = os.environ['SEAFILE_CONF_DIR']
self.central_config_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR')
env_mgr = EnvManager()
class Utils(object):
@staticmethod
def highlight(content, is_error=False):
'''Add ANSI color to content to get it highlighted on terminal'''
if is_error:
return '\x1b[1;31m%s\x1b[m' % content
else:
return '\x1b[1;32m%s\x1b[m' % content
@staticmethod
def info(msg):
print Utils.highlight('[INFO] ') + msg
@staticmethod
def error(msg):
print Utils.highlight('[ERROR] ') + msg
sys.exit(1)
@staticmethod
def read_config(config_path, defaults):
if not os.path.exists(config_path):
Utils.error('Config path %s doesn\'t exist, stop db upgrade' %
config_path)
cp = ConfigParser.ConfigParser(defaults)
cp.read(config_path)
return cp
class MySQLDBInfo(object):
def __init__(self, host, port, username, password, db, unix_socket=None):
self.host = host
self.port = port
self.username = username
self.password = password
self.db = db
self.unix_socket = unix_socket
class DBUpdater(object):
def __init__(self, version, name):
self.sql_dir = os.path.join(env_mgr.upgrade_dir, 'sql', version, name)
@staticmethod
def get_instance(version):
'''Detect whether we are using mysql or sqlite3'''
ccnet_db_info = DBUpdater.get_ccnet_mysql_info(version)
seafile_db_info = DBUpdater.get_seafile_mysql_info(version)
seahub_db_info = DBUpdater.get_seahub_mysql_info()
if ccnet_db_info and seafile_db_info and seahub_db_info:
Utils.info('You are using MySQL')
if not HAS_MYSQLDB:
Utils.error('Python MySQLdb module is not found')
updater = MySQLDBUpdater(version, ccnet_db_info, seafile_db_info, seahub_db_info)
elif (ccnet_db_info is None) and (seafile_db_info is None) and (seahub_db_info is None):
Utils.info('You are using SQLite3')
if not HAS_SQLITE3:
Utils.error('Python sqlite3 module is not found')
updater = SQLiteDBUpdater(version)
else:
def to_db_string(info):
if info is None:
return 'SQLite3'
else:
return 'MySQL'
Utils.error('Error:\n ccnet is using %s\n seafile is using %s\n seahub is using %s\n'
% (to_db_string(ccnet_db_info),
to_db_string(seafile_db_info),
to_db_string(seahub_db_info)))
return updater
def update_db(self):
ccnet_sql = os.path.join(self.sql_dir, 'ccnet.sql')
seafile_sql = os.path.join(self.sql_dir, 'seafile.sql')
seahub_sql = os.path.join(self.sql_dir, 'seahub.sql')
if os.path.exists(ccnet_sql):
Utils.info('updating ccnet database...')
self.update_ccnet_sql(ccnet_sql)
if os.path.exists(seafile_sql):
Utils.info('updating seafile database...')
self.update_seafile_sql(seafile_sql)
if os.path.exists(seahub_sql):
Utils.info('updating seahub database...')
self.update_seahub_sql(seahub_sql)
@staticmethod
def get_ccnet_mysql_info(version):
if version > '5.0.0':
config_path = env_mgr.central_config_dir
else:
config_path = env_mgr.ccnet_dir
ccnet_conf = os.path.join(config_path, 'ccnet.conf')
defaults = {
'HOST': '127.0.0.1',
'PORT': '3306',
'UNIX_SOCKET': '',
}
config = Utils.read_config(ccnet_conf, defaults)
db_section = 'Database'
if not config.has_section(db_section):
return None
type = config.get(db_section, 'ENGINE')
if type != 'mysql':
return None
try:
host = config.get(db_section, 'HOST')
port = config.getint(db_section, 'PORT')
username = config.get(db_section, 'USER')
password = config.get(db_section, 'PASSWD')
db = config.get(db_section, 'DB')
unix_socket = config.get(db_section, 'UNIX_SOCKET')
except ConfigParser.NoOptionError, e:
Utils.error('Database config in ccnet.conf is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
return info
@staticmethod
def get_seafile_mysql_info(version):
if version > '5.0.0':
config_path = env_mgr.central_config_dir
else:
config_path = env_mgr.seafile_dir
seafile_conf = os.path.join(config_path, 'seafile.conf')
defaults = {
'HOST': '127.0.0.1',
'PORT': '3306',
'UNIX_SOCKET': '',
}
config = Utils.read_config(seafile_conf, defaults)
db_section = 'database'
if not config.has_section(db_section):
return None
type = config.get(db_section, 'type')
if type != 'mysql':
return None
try:
host = config.get(db_section, 'host')
port = config.getint(db_section, 'port')
username = config.get(db_section, 'user')
password = config.get(db_section, 'password')
db = config.get(db_section, 'db_name')
unix_socket = config.get(db_section, 'unix_socket')
except ConfigParser.NoOptionError, e:
Utils.error('Database config in seafile.conf is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
return info
@staticmethod
def get_seahub_mysql_info():
sys.path.insert(0, env_mgr.top_dir)
if env_mgr.central_config_dir:
sys.path.insert(0, env_mgr.central_config_dir)
try:
import seahub_settings # pylint: disable=F0401
except ImportError, e:
Utils.error('Failed to import seahub_settings.py: %s' % e)
if not hasattr(seahub_settings, 'DATABASES'):
return None
try:
d = seahub_settings.DATABASES['default']
if d['ENGINE'] != 'django.db.backends.mysql':
return None
host = d.get('HOST', '127.0.0.1')
port = int(d.get('PORT', 3306))
username = d['USER']
password = d['PASSWORD']
db = d['NAME']
unix_socket = host if host.startswith('/') else None
except KeyError:
Utils.error('Database config in seahub_settings.py is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db, unix_socket)
return info
def update_ccnet_sql(self, ccnet_sql):
raise NotImplementedError
def update_seafile_sql(self, seafile_sql):
raise NotImplementedError
def update_seahub_sql(self, seahub_sql):
raise NotImplementedError
class CcnetSQLiteDB(object):
def __init__(self, ccnet_dir):
self.ccnet_dir = ccnet_dir
def get_db(self, dbname):
dbs = (
'ccnet.db',
'GroupMgr/groupmgr.db',
'misc/config.db',
'OrgMgr/orgmgr.db',
)
for db in dbs:
if os.path.splitext(os.path.basename(db))[0] == dbname:
return os.path.join(self.ccnet_dir, db)
class SQLiteDBUpdater(DBUpdater):
def __init__(self, version):
DBUpdater.__init__(self, version, 'sqlite3')
self.ccnet_db = CcnetSQLiteDB(env_mgr.ccnet_dir)
self.seafile_db = os.path.join(env_mgr.seafile_dir, 'seafile.db')
self.seahub_db = os.path.join(env_mgr.top_dir, 'seahub.db')
def update_db(self):
super(SQLiteDBUpdater, self).update_db()
for sql_path in glob.glob(os.path.join(self.sql_dir, 'ccnet', '*.sql')):
self.update_ccnet_sql(sql_path)
def apply_sqls(self, db_path, sql_path):
with open(sql_path, 'r') as fp:
lines = fp.read().split(';')
with sqlite3.connect(db_path) as conn:
for line in lines:
line = line.strip()
if not line:
continue
else:
conn.execute(line)
def update_ccnet_sql(self, sql_path):
dbname = os.path.splitext(os.path.basename(sql_path))[0]
self.apply_sqls(self.ccnet_db.get_db(dbname), sql_path)
def update_seafile_sql(self, sql_path):
self.apply_sqls(self.seafile_db, sql_path)
def update_seahub_sql(self, sql_path):
self.apply_sqls(self.seahub_db, sql_path)
class MySQLDBUpdater(DBUpdater):
def __init__(self, version, ccnet_db_info, seafile_db_info, seahub_db_info):
DBUpdater.__init__(self, version, 'mysql')
self.ccnet_db_info = ccnet_db_info
self.seafile_db_info = seafile_db_info
self.seahub_db_info = seahub_db_info
def update_ccnet_sql(self, ccnet_sql):
self.apply_sqls(self.ccnet_db_info, ccnet_sql)
def update_seafile_sql(self, seafile_sql):
self.apply_sqls(self.seafile_db_info, seafile_sql)
def update_seahub_sql(self, seahub_sql):
self.apply_sqls(self.seahub_db_info, seahub_sql)
def get_conn(self, info):
kw = dict(
user=info.username,
passwd=info.password,
db=info.db,
)
if info.unix_socket:
kw['unix_socket'] = info.unix_socket
else:
kw['host'] = info.host
kw['port'] = info.port
try:
conn = MySQLdb.connect(**kw)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
msg = str(e.args[1])
else:
msg = str(e)
Utils.error('Failed to connect to mysql database %s: %s' % (info.db, msg))
return conn
def execute_sql(self, conn, sql):
cursor = conn.cursor()
try:
cursor.execute(sql)
conn.commit()
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
msg = str(e.args[1])
else:
msg = str(e)
Utils.error('Failed to execute sql: %s' % msg)
def apply_sqls(self, info, sql_path):
with open(sql_path, 'r') as fp:
lines = fp.read().split(';')
conn = self.get_conn(info)
for line in lines:
line = line.strip()
if not line:
continue
else:
self.execute_sql(conn, line)
def main():
skipdb = os.environ.get('SEAFILE_SKIP_DB_UPGRADE', '').lower()
if skipdb in ('1', 'true', 'on'):
print 'Database upgrade skipped because SEAFILE_SKIP_DB_UPGRADE=%s' % skipdb
sys.exit()
version = sys.argv[1]
db_updater = DBUpdater.get_instance(version)
db_updater.update_db()
return 0
if __name__ == '__main__':
main()

View File

@ -1,234 +0,0 @@
#!/usr/bin/env python
import os
import sys
import re
import ConfigParser
import getpass
from collections import namedtuple
try:
import MySQLdb
HAS_MYSQLDB = True
except ImportError:
HAS_MYSQLDB = False
MySQLDBInfo = namedtuple('MySQLDBInfo', 'host port username password db')
class EnvManager(object):
def __init__(self):
self.upgrade_dir = os.path.abspath(os.path.dirname(__file__))
self.install_path = os.path.dirname(self.upgrade_dir)
self.top_dir = os.path.dirname(self.install_path)
self.ccnet_dir = os.environ['CCNET_CONF_DIR']
self.seafile_dir = os.environ['SEAFILE_CONF_DIR']
env_mgr = EnvManager()
class Utils(object):
@staticmethod
def highlight(content, is_error=False):
'''Add ANSI color to content to get it highlighted on terminal'''
if is_error:
return '\x1b[1;31m%s\x1b[m' % content
else:
return '\x1b[1;32m%s\x1b[m' % content
@staticmethod
def info(msg):
print Utils.highlight('[INFO] ') + msg
@staticmethod
def error(msg):
print Utils.highlight('[ERROR] ') + msg
sys.exit(1)
@staticmethod
def read_config(config_path, defaults):
cp = ConfigParser.ConfigParser(defaults)
cp.read(config_path)
return cp
def get_ccnet_mysql_info():
ccnet_conf = os.path.join(env_mgr.ccnet_dir, 'ccnet.conf')
defaults = {
'HOST': '127.0.0.1',
'PORT': '3306',
}
config = Utils.read_config(ccnet_conf, defaults)
db_section = 'Database'
if not config.has_section(db_section):
return None
type = config.get(db_section, 'ENGINE')
if type != 'mysql':
return None
try:
host = config.get(db_section, 'HOST')
port = config.getint(db_section, 'PORT')
username = config.get(db_section, 'USER')
password = config.get(db_section, 'PASSWD')
db = config.get(db_section, 'DB')
except ConfigParser.NoOptionError, e:
Utils.error('Database config in ccnet.conf is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db)
return info
def get_seafile_mysql_info():
seafile_conf = os.path.join(env_mgr.seafile_dir, 'seafile.conf')
defaults = {
'HOST': '127.0.0.1',
'PORT': '3306',
}
config = Utils.read_config(seafile_conf, defaults)
db_section = 'database'
if not config.has_section(db_section):
return None
type = config.get(db_section, 'type')
if type != 'mysql':
return None
try:
host = config.get(db_section, 'host')
port = config.getint(db_section, 'port')
username = config.get(db_section, 'user')
password = config.get(db_section, 'password')
db = config.get(db_section, 'db_name')
except ConfigParser.NoOptionError, e:
Utils.error('Database config in seafile.conf is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db)
return info
def get_seahub_mysql_info():
sys.path.insert(0, env_mgr.top_dir)
try:
import seahub_settings# pylint: disable=F0401
except ImportError, e:
Utils.error('Failed to import seahub_settings.py: %s' % e)
if not hasattr(seahub_settings, 'DATABASES'):
return None
try:
d = seahub_settings.DATABASES['default']
if d['ENGINE'] != 'django.db.backends.mysql':
return None
host = d.get('HOST', '127.0.0.1')
port = int(d.get('PORT', 3306))
username = d['USER']
password = d['PASSWORD']
db = d['NAME']
except KeyError:
Utils.error('Database config in seahub_settings.py is invalid: %s' % e)
info = MySQLDBInfo(host, port, username, password, db)
return info
def get_seafile_db_infos():
ccnet_db_info = get_ccnet_mysql_info()
seafile_db_info = get_seafile_mysql_info()
seahub_db_info = get_seahub_mysql_info()
infos = [ccnet_db_info, seafile_db_info, seahub_db_info]
for info in infos:
if info is None:
return None
if info.host not in ('localhost', '127.0.0.1'):
return None
return infos
def ask_root_password(port):
while True:
desc = 'What is the root password for mysql? '
password = getpass.getpass(desc).strip()
if password:
try:
return check_mysql_user('root', password, port)
except InvalidAnswer, e:
print '\n%s\n' % e
continue
class InvalidAnswer(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
def check_mysql_user(user, password, port):
print '\nverifying password of root user %s ... ' % user,
kwargs = dict(host='localhost',
port=port,
user=user,
passwd=password)
try:
conn = MySQLdb.connect(**kwargs)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s'
% (user, e.args[1]))
else:
raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s'
% (user, e))
print 'done'
return conn
def apply_fix(root_conn, user, dbs):
for db in dbs:
grant_db_permission(root_conn, user, db)
cursor = root_conn.cursor()
sql = """
SELECT *
FROM mysql.user
WHERE Host = '%%'
AND password = ''
AND User = '%s'
""" % user
cursor.execute(sql)
if cursor.rowcount > 0:
sql = 'DROP USER `%s`@`%%`' % user
cursor.execute(sql)
def grant_db_permission(conn, user, db):
cursor = conn.cursor()
sql = '''GRANT ALL PRIVILEGES ON `%s`.* to `%s`@localhost ''' \
% (db, user)
try:
cursor.execute(sql)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
Utils.error('Failed to grant permission of database %s: %s' % (db, e.args[1]))
else:
Utils.error('Failed to grant permission of database %s: %s' % (db, e))
finally:
cursor.close()
def main():
dbinfos = get_seafile_db_infos()
if not dbinfos:
return
if dbinfos[0].username == 'root':
return
if not HAS_MYSQLDB:
Utils.error('Python MySQLdb module is not found')
root_conn = ask_root_password(dbinfos[0].port)
apply_fix(root_conn, dbinfos[0].username, [info.db for info in dbinfos])
if __name__ == '__main__':
main()

View File

@ -1,115 +0,0 @@
#!/bin/bash
SCRIPT=$(readlink -f "$0") # haiwen/seafile-server-1.3.0/upgrade/upgrade_xx_xx.sh
UPGRADE_DIR=$(dirname "$SCRIPT") # haiwen/seafile-server-1.3.0/upgrade/
INSTALLPATH=$(dirname "$UPGRADE_DIR") # haiwen/seafile-server-1.3.0/
TOPDIR=$(dirname "${INSTALLPATH}") # haiwen/
echo
echo "-------------------------------------------------------------"
echo "This script would do the minor upgrade for you."
echo "Press [ENTER] to contiune"
echo "-------------------------------------------------------------"
echo
read dummy
media_dir=${INSTALLPATH}/seahub/media
orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
dest_avatar_dir=${TOPDIR}/seahub-data/avatars
seafile_server_symlink=${TOPDIR}/seafile-server-latest
seahub_data_dir=${TOPDIR}/seahub-data
function migrate_avatars() {
echo
echo "------------------------------"
echo "migrating avatars ..."
echo
# move "media/avatars" directory outside
if [[ ! -d ${dest_avatar_dir} ]]; then
echo
echo "Error: avatars directory \"${dest_avatar_dir}\" does not exist" 2>&1
echo
exit 1
elif [[ ! -L ${orig_avatar_dir}} ]]; then
mv "${orig_avatar_dir}"/* "${dest_avatar_dir}" 2>/dev/null 1>&2
rm -rf "${orig_avatar_dir}"
ln -s ../../../seahub-data/avatars "${media_dir}"
fi
echo
echo "DONE"
echo "------------------------------"
echo
}
function make_media_custom_symlink() {
media_symlink=${INSTALLPATH}/seahub/media/custom
if [[ -L "${media_symlink}" ]]; then
return
elif [[ ! -e "${media_symlink}" ]]; then
ln -s ../../../seahub-data/custom "${media_symlink}"
return
elif [[ -d "${media_symlink}" ]]; then
cp -rf "${media_symlink}" "${seahub_data_dir}/"
rm -rf "${media_symlink}"
ln -s ../../../seahub-data/custom "${media_symlink}"
fi
}
function move_old_customdir_outside() {
# find the path of the latest seafile server folder
if [[ -L ${seafile_server_symlink} ]]; then
latest_server=$(readlink -f "${seafile_server_symlink}")
else
return
fi
old_customdir=${latest_server}/seahub/media/custom
# old customdir is already a symlink, do nothing
if [[ -L "${old_customdir}" ]]; then
return
fi
# old customdir does not exist, do nothing
if [[ ! -e "${old_customdir}" ]]; then
return
fi
# media/custom exist and is not a symlink
cp -rf "${old_customdir}" "${seahub_data_dir}/"
}
function update_latest_symlink() {
# update the symlink seafile-server to the new server version
echo
echo "updating seafile-server-latest symbolic link to ${INSTALLPATH} ..."
echo
if ! rm -f "${seafile_server_symlink}"; then
echo "Failed to remove ${seafile_server_symlink}"
echo
exit 1;
fi
if ! ln -s "$(basename ${INSTALLPATH})" "${seafile_server_symlink}"; then
echo "Failed to update ${seafile_server_symlink} symbolic link."
echo
exit 1;
fi
}
migrate_avatars;
move_old_customdir_outside;
make_media_custom_symlink;
update_latest_symlink;
echo "DONE"
echo "------------------------------"
echo

View File

@ -1,13 +0,0 @@
#!/bin/bash
SCRIPT=$(readlink -f "$0")
UPGRADEDIR=$(dirname "${SCRIPT}")
INSTALLPATH=$(dirname "${UPGRADEDIR}")
TOPDIR=$(dirname "${INSTALLPATH}")
seahub_secret_keygen=${INSTALLPATH}/seahub/tools/secret_key_generator.py
seahub_settings_py=${TOPDIR}/seahub_settings.py
line="SECRET_KEY = \"$(python $seahub_secret_keygen)\""
sed -i -e "/SECRET_KEY/c\\$line" $seahub_settings_py

View File

@ -1,47 +0,0 @@
CREATE TABLE IF NOT EXISTS `wiki_groupwiki` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`group_id` int(11) NOT NULL,
`repo_id` varchar(36) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `group_id` (`group_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `wiki_personalwiki` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(255) NOT NULL,
`repo_id` varchar(36) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `username` (`username`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `group_publicgroup` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`group_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `group_publicgroup_425ae3c4` (`group_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `base_filediscuss` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`group_message_id` int(11) NOT NULL,
`repo_id` varchar(36) NOT NULL,
`path` longtext NOT NULL,
`path_hash` varchar(12) NOT NULL,
PRIMARY KEY (`id`),
KEY `base_filediscuss_3c1a2584` (`group_message_id`),
KEY `base_filediscuss_6844bd5a` (`path_hash`),
CONSTRAINT `group_message_id_refs_id_2ade200f` FOREIGN KEY (`group_message_id`) REFERENCES `group_groupmessage` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `base_filelastmodifiedinfo` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`repo_id` varchar(36) NOT NULL,
`file_id` varchar(40) NOT NULL,
`file_path` longtext NOT NULL,
`file_path_hash` varchar(12) NOT NULL,
`last_modified` bigint(20) NOT NULL,
`email` varchar(75) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `repo_id` (`repo_id`,`file_path_hash`),
KEY `base_filelastmodifiedinfo_359081cc` (`repo_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ;

View File

@ -1,39 +0,0 @@
CREATE TABLE IF NOT EXISTS "wiki_groupwiki" (
"id" integer NOT NULL PRIMARY KEY,
"group_id" integer NOT NULL UNIQUE,
"repo_id" varchar(36) NOT NULL
);
CREATE TABLE IF NOT EXISTS "wiki_personalwiki" (
"id" integer NOT NULL PRIMARY KEY,
"username" varchar(256) NOT NULL UNIQUE,
"repo_id" varchar(36) NOT NULL
);
CREATE TABLE IF NOT EXISTS "group_publicgroup" (
"id" integer NOT NULL PRIMARY KEY,
"group_id" integer NOT NULL
);
CREATE INDEX IF NOT EXISTS "group_publicgroup_bda51c3c" ON "group_publicgroup" ("group_id");
CREATE TABLE IF NOT EXISTS "base_filediscuss" (
"id" integer NOT NULL PRIMARY KEY,
"group_message_id" integer NOT NULL REFERENCES "group_groupmessage" ("id"),
"repo_id" varchar(40) NOT NULL,
"path" text NOT NULL,
"path_hash" varchar(12) NOT NULL
);
CREATE INDEX IF NOT EXISTS "base_filediscuss_6844bd5a" ON "base_filediscuss" ("path_hash");
CREATE INDEX IF NOT EXISTS "base_filediscuss_c3e5da7c" ON "base_filediscuss" ("group_message_id");
CREATE TABLE IF NOT EXISTS "base_filelastmodifiedinfo" (
"id" integer NOT NULL PRIMARY KEY,
"repo_id" varchar(36) NOT NULL,
"file_id" varchar(40) NOT NULL,
"file_path" text NOT NULL,
"file_path_hash" varchar(12) NOT NULL,
"last_modified" bigint NOT NULL,
"email" varchar(75) NOT NULL,
UNIQUE ("repo_id", "file_path_hash")
);
CREATE INDEX IF NOT EXISTS "base_filelastmodifiedinfo_ca6f7e34" ON "base_filelastmodifiedinfo" ("repo_id");

View File

@ -1 +0,0 @@
CREATE INDEX repousertoken_email on RepoUserToken(email);

View File

@ -1,17 +0,0 @@
CREATE TABLE `message_usermessage` (
`message_id` int(11) NOT NULL AUTO_INCREMENT,
`message` varchar(512) NOT NULL,
`from_email` varchar(75) NOT NULL,
`to_email` varchar(75) NOT NULL,
`timestamp` datetime NOT NULL,
`ifread` tinyint(1) NOT NULL,
PRIMARY KEY (`message_id`),
KEY `message_usermessage_8b1dd4eb` (`from_email`),
KEY `message_usermessage_590d1560` (`to_email`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `message_usermsglastcheck` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`check_time` datetime NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View File

@ -1 +0,0 @@
CREATE INDEX IF NOT EXISTS repousertoken_email on RepoUserToken(email);

View File

@ -1,16 +0,0 @@
CREATE TABLE IF NOT EXISTS "message_usermessage" (
"message_id" integer NOT NULL PRIMARY KEY,
"message" varchar(512) NOT NULL,
"from_email" varchar(75) NOT NULL,
"to_email" varchar(75) NOT NULL,
"timestamp" datetime NOT NULL,
"ifread" bool NOT NULL
)
;
CREATE TABLE IF NOT EXISTS "message_usermsglastcheck" (
"id" integer NOT NULL PRIMARY KEY,
"check_time" datetime NOT NULL
)
;
CREATE INDEX IF NOT EXISTS "message_usermessage_8b1dd4eb" ON "message_usermessage" ("from_email");
CREATE INDEX IF NOT EXISTS "message_usermessage_590d1560" ON "message_usermessage" ("to_email");

View File

@ -1,2 +0,0 @@
-- ccnet
ALTER TABLE EmailUser MODIFY passwd varchar(64);

View File

@ -1,30 +0,0 @@
-- seahub
ALTER TABLE group_groupmessage MODIFY message varchar(2048);
ALTER TABLE group_messagereply MODIFY message varchar(2048);
CREATE TABLE IF NOT EXISTS `share_privatefiledirshare` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`from_user` varchar(255) NOT NULL,
`to_user` varchar(255) NOT NULL,
`repo_id` varchar(36) NOT NULL,
`path` longtext NOT NULL,
`token` varchar(10) NOT NULL,
`permission` varchar(5) NOT NULL,
`s_type` varchar(5) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `token` (`token`),
KEY `share_privatefiledirshare_0e7efed3` (`from_user`),
KEY `share_privatefiledirshare_bc172800` (`to_user`),
KEY `share_privatefiledirshare_2059abe4` (`repo_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `message_usermsgattachment` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_msg_id` int(11) NOT NULL,
`priv_file_dir_share_id` int(11) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `message_usermsgattachment_72f290f5` (`user_msg_id`),
KEY `message_usermsgattachment_cee41a9a` (`priv_file_dir_share_id`),
CONSTRAINT `priv_file_dir_share_id_refs_id_163f8f83` FOREIGN KEY (`priv_file_dir_share_id`) REFERENCES `share_privatefiledirshare` (`id`),
CONSTRAINT `user_msg_id_refs_message_id_debb82ad` FOREIGN KEY (`user_msg_id`) REFERENCES `message_usermessage` (`message_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View File

@ -1,20 +0,0 @@
CREATE TABLE IF NOT EXISTS "share_privatefiledirshare" (
"id" integer NOT NULL PRIMARY KEY,
"from_user" varchar(255) NOT NULL,
"to_user" varchar(255) NOT NULL,
"repo_id" varchar(36) NOT NULL,
"path" text NOT NULL,
"token" varchar(10) NOT NULL UNIQUE,
"permission" varchar(5) NOT NULL,
"s_type" varchar(5) NOT NULL
);
CREATE TABLE IF NOT EXISTS "message_usermsgattachment" (
"id" integer NOT NULL PRIMARY KEY,
"user_msg_id" integer NOT NULL REFERENCES "message_usermessage" ("message_id"),
"priv_file_dir_share_id" integer REFERENCES "share_privatefiledirshare" ("id")
);
CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_0e7efed3" ON "share_privatefiledirshare" ("from_user");
CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_2059abe4" ON "share_privatefiledirshare" ("repo_id");
CREATE INDEX IF NOT EXISTS "share_privatefiledirshare_bc172800" ON "share_privatefiledirshare" ("to_user");

View File

@ -1,24 +0,0 @@
-- seahub
CREATE TABLE IF NOT EXISTS `base_groupenabledmodule` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`group_id` varchar(10) NOT NULL,
`module_name` varchar(20) NOT NULL,
PRIMARY KEY (`id`),
KEY `base_groupenabledmodule_dc00373b` (`group_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `base_userenabledmodule` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(255) NOT NULL,
`module_name` varchar(20) NOT NULL,
PRIMARY KEY (`id`),
KEY `base_userenabledmodule_ee0cafa2` (`username`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `base_userlastlogin` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(255) NOT NULL,
`last_login` datetime NOT NULL,
PRIMARY KEY (`id`),
KEY `base_userlastlogin_ee0cafa2` (`username`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View File

@ -1,20 +0,0 @@
CREATE TABLE IF NOT EXISTS "base_groupenabledmodule" (
"id" integer NOT NULL PRIMARY KEY,
"group_id" varchar(10) NOT NULL,
"module_name" varchar(20) NOT NULL
);
CREATE TABLE IF NOT EXISTS "base_userenabledmodule" (
"id" integer NOT NULL PRIMARY KEY,
"username" varchar(255) NOT NULL,
"module_name" varchar(20) NOT NULL
);
CREATE TABLE IF NOT EXISTS "base_userlastlogin" (
"id" integer NOT NULL PRIMARY KEY,
"username" varchar(255) NOT NULL,
"last_login" datetime NOT NULL
);
CREATE INDEX IF NOT EXISTS "base_groupenabledmodule_dc00373b" ON "base_groupenabledmodule" ("group_id");
CREATE INDEX IF NOT EXISTS "base_userenabledmodule_ee0cafa2" ON "base_userenabledmodule" ("username");

View File

@ -1,53 +0,0 @@
CREATE TABLE IF NOT EXISTS `captcha_captchastore` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`challenge` varchar(32) NOT NULL,
`response` varchar(32) NOT NULL,
`hashkey` varchar(40) NOT NULL,
`expiration` datetime NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `hashkey` (`hashkey`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
DROP TABLE IF EXISTS `notifications_usernotification`;
CREATE TABLE IF NOT EXISTS `notifications_usernotification` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`to_user` varchar(255) NOT NULL,
`msg_type` varchar(30) NOT NULL,
`detail` longtext NOT NULL,
`timestamp` datetime NOT NULL,
`seen` tinyint(1) NOT NULL,
PRIMARY KEY (`id`),
KEY `notifications_usernotification_bc172800` (`to_user`),
KEY `notifications_usernotification_265e5521` (`msg_type`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `options_useroptions` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`email` varchar(255) NOT NULL,
`option_key` varchar(50) NOT NULL,
`option_val` varchar(50) NOT NULL,
PRIMARY KEY (`id`),
KEY `options_useroptions_830a6ccb` (`email`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `profile_detailedprofile` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user` varchar(255) NOT NULL,
`department` varchar(512) NOT NULL,
`telephone` varchar(100) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `share_uploadlinkshare` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(255) NOT NULL,
`repo_id` varchar(36) NOT NULL,
`path` longtext NOT NULL,
`token` varchar(10) NOT NULL,
`ctime` datetime NOT NULL,
`view_cnt` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `token` (`token`),
KEY `share_uploadlinkshare_ee0cafa2` (`username`),
KEY `share_uploadlinkshare_2059abe4` (`repo_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View File

@ -1,48 +0,0 @@
CREATE TABLE IF NOT EXISTS "captcha_captchastore" (
"id" integer NOT NULL PRIMARY KEY,
"challenge" varchar(32) NOT NULL,
"response" varchar(32) NOT NULL,
"hashkey" varchar(40) NOT NULL UNIQUE,
"expiration" datetime NOT NULL
);
DROP TABLE IF EXISTS "notifications_usernotification";
CREATE TABLE IF NOT EXISTS "notifications_usernotification" (
"id" integer NOT NULL PRIMARY KEY,
"to_user" varchar(255) NOT NULL,
"msg_type" varchar(30) NOT NULL,
"detail" text NOT NULL,
"timestamp" datetime NOT NULL,
"seen" bool NOT NULL
);
CREATE INDEX IF NOT EXISTS "notifications_usernotification_265e5521" ON "notifications_usernotification" ("msg_type");
CREATE INDEX IF NOT EXISTS "notifications_usernotification_bc172800" ON "notifications_usernotification" ("to_user");
CREATE TABLE IF NOT EXISTS "options_useroptions" (
"id" integer NOT NULL PRIMARY KEY,
"email" varchar(255) NOT NULL,
"option_key" varchar(50) NOT NULL,
"option_val" varchar(50) NOT NULL
);
CREATE INDEX IF NOT EXISTS "options_useroptions_830a6ccb" ON "options_useroptions" ("email");
CREATE TABLE IF NOT EXISTS "profile_detailedprofile" (
"id" integer NOT NULL PRIMARY KEY,
"user" varchar(255) NOT NULL,
"department" varchar(512) NOT NULL,
"telephone" varchar(100) NOT NULL
);
CREATE INDEX IF NOT EXISTS "profile_detailedprofile_6340c63c" ON "profile_detailedprofile" ("user");
CREATE TABLE IF NOT EXISTS "share_uploadlinkshare" (
"id" integer NOT NULL PRIMARY KEY,
"username" varchar(255) NOT NULL,
"repo_id" varchar(36) NOT NULL,
"path" text NOT NULL,
"token" varchar(10) NOT NULL UNIQUE,
"ctime" datetime NOT NULL,
"view_cnt" integer NOT NULL
);
CREATE INDEX IF NOT EXISTS "share_uploadlinkshare_2059abe4" ON "share_uploadlinkshare" ("repo_id");
CREATE INDEX IF NOT EXISTS "share_uploadlinkshare_ee0cafa2" ON "share_uploadlinkshare" ("username");

View File

@ -1,2 +0,0 @@
ALTER TABLE EmailUser MODIFY passwd varchar(256);

View File

@ -1,13 +0,0 @@
CREATE TABLE IF NOT EXISTS `api2_tokenv2` (
`key` varchar(40) NOT NULL,
`user` varchar(255) NOT NULL,
`platform` varchar(32) NOT NULL,
`device_id` varchar(40) NOT NULL,
`device_name` varchar(40) NOT NULL,
`platform_version` varchar(16) NOT NULL,
`client_version` varchar(16) NOT NULL,
`last_accessed` datetime NOT NULL,
`last_login_ip` char(39) DEFAULT NULL,
PRIMARY KEY (`key`),
UNIQUE KEY `user` (`user`,`platform`,`device_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View File

@ -1,12 +0,0 @@
CREATE TABLE IF NOT EXISTS "api2_tokenv2" (
"key" varchar(40) NOT NULL PRIMARY KEY,
"user" varchar(255) NOT NULL,
"platform" varchar(32) NOT NULL,
"device_id" varchar(40) NOT NULL,
"device_name" varchar(40) NOT NULL,
"platform_version" varchar(16) NOT NULL,
"client_version" varchar(16) NOT NULL,
"last_accessed" datetime NOT NULL,
"last_login_ip" char(39),
UNIQUE ("user", "platform", "device_id")
);

View File

@ -1,20 +0,0 @@
alter table message_usermessage add column sender_deleted_at datetime DEFAULT NULL;
alter table message_usermessage add column recipient_deleted_at datetime DEFAULT NULL;
alter table share_fileshare add column password varchar(128);
alter table share_fileshare add column expire_date datetime;
alter table share_uploadlinkshare add column password varchar(128);
alter table share_uploadlinkshare add column expire_date datetime;
alter table profile_profile add column lang_code varchar(50) DEFAULT NULL;
CREATE TABLE IF NOT EXISTS `share_orgfileshare` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`org_id` int(11) NOT NULL,
`file_share_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `file_share_id` (`file_share_id`),
KEY `share_orgfileshare_944dadb6` (`org_id`),
CONSTRAINT `file_share_id_refs_id_bd2fd9f8` FOREIGN KEY (`file_share_id`) REFERENCES `share_fileshare` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
ALTER TABLE `base_userstarredfiles` ADD INDEX `base_userstarredfiles_email` (email);

View File

@ -1,16 +0,0 @@
alter table "message_usermessage" add column "sender_deleted_at" datetime;
alter table "message_usermessage" add column "recipient_deleted_at" datetime;
alter table "share_fileshare" add column "password" varchar(128);
alter table "share_fileshare" add column "expire_date" datetime;
alter table "share_uploadlinkshare" add column "password" varchar(128);
alter table "share_uploadlinkshare" add column "expire_date" datetime;
alter table "profile_profile" add column "lang_code" varchar(50);
CREATE TABLE IF NOT EXISTS "share_orgfileshare" (
"id" integer NOT NULL PRIMARY KEY,
"org_id" integer NOT NULL,
"file_share_id" integer NOT NULL UNIQUE REFERENCES "share_fileshare" ("id")
);
CREATE INDEX IF NOT EXISTS "share_orgfileshare_944dadb6" ON "share_orgfileshare" ("org_id");
CREATE INDEX IF NOT EXISTS "base_userstarredfiles_email" on "base_userstarredfiles" ("email");

View File

@ -1 +0,0 @@
ALTER TABLE `Group` ADD type VARCHAR(32);

View File

@ -1,30 +0,0 @@
ALTER TABLE SharedRepo MODIFY from_email VARCHAR(255);
ALTER TABLE SharedRepo MODIFY to_email VARCHAR(255);
ALTER TABLE SharedRepo ADD INDEX (from_email);
ALTER TABLE SharedRepo ADD INDEX (to_email);
CREATE TABLE IF NOT EXISTS OrgSharedRepo (
id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,
org_id INT,
repo_id CHAR(37) ,
from_email VARCHAR(255),
to_email VARCHAR(255),
permission CHAR(15),
INDEX (org_id, repo_id),
INDEX(from_email),
INDEX(to_email)
) ENGINE=INNODB;
ALTER TABLE OrgSharedRepo MODIFY from_email VARCHAR(255);
ALTER TABLE OrgSharedRepo MODIFY to_email VARCHAR(255);
CREATE TABLE IF NOT EXISTS RepoTrash (
repo_id CHAR(36) PRIMARY KEY,
repo_name VARCHAR(255),
head_id CHAR(40),
owner_id VARCHAR(255),
size BIGINT(20),
org_id INTEGER,
INDEX(owner_id),
INDEX(org_id)
) ENGINE=INNODB;

View File

@ -1 +0,0 @@
ALTER TABLE `Group` ADD type VARCHAR(32);

View File

@ -1,14 +0,0 @@
CREATE INDEX IF NOT EXISTS FromEmailIndex on SharedRepo (from_email);
CREATE INDEX IF NOT EXISTS ToEmailIndex on SharedRepo (to_email);
CREATE TABLE IF NOT EXISTS RepoTrash (
repo_id CHAR(36) PRIMARY KEY,
repo_name VARCHAR(255),
head_id CHAR(40),
owner_id VARCHAR(255),
size BIGINT UNSIGNED,
org_id INTEGER
);
CREATE INDEX IF NOT EXISTS repotrash_owner_id_idx ON RepoTrash(owner_id);
CREATE INDEX IF NOT EXISTS repotrash_org_id_idx ON RepoTrash(org_id);

View File

@ -1 +0,0 @@
alter table RepoTrash add del_time BIGINT;

View File

@ -1,18 +0,0 @@
CREATE TABLE IF NOT EXISTS `base_clientlogintoken` (
`token` varchar(32) NOT NULL,
`username` varchar(255) NOT NULL,
`timestamp` datetime NOT NULL,
PRIMARY KEY (`token`),
KEY `base_clientlogintoken_ee0cafa2` (`username`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `organizations_orgmemberquota` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`org_id` int(11) NOT NULL,
`quota` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `organizations_orgmemberquota_944dadb6` (`org_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
REPLACE INTO django_content_type VALUES(44,'client login token','base','clientlogintoken');
REPLACE INTO django_content_type VALUES(45,'org member quota','organizations','orgmemberquota');

View File

@ -1 +0,0 @@
alter table RepoTrash add del_time BIGINT;

View File

@ -1,18 +0,0 @@
CREATE TABLE IF NOT EXISTS "base_clientlogintoken" (
"token" varchar(32) NOT NULL PRIMARY KEY,
"username" varchar(255) NOT NULL,
"timestamp" datetime NOT NULL
);
CREATE INDEX IF NOT EXISTS "base_clientlogintoken_ee0cafa2" ON "base_clientlogintoken" ("username");
CREATE TABLE IF NOT EXISTS "organizations_orgmemberquota" (
"id" integer NOT NULL PRIMARY KEY,
"org_id" integer NOT NULL,
"quota" integer NOT NULL
);
CREATE INDEX IF NOT EXISTS "organizations_orgmemberquota_944dadb6" ON "organizations_orgmemberquota" ("org_id");
REPLACE INTO "django_content_type" VALUES(44,'client login token','base','clientlogintoken');
REPLACE INTO "django_content_type" VALUES(45,'org member quota','organizations','orgmemberquota');

View File

@ -1,17 +0,0 @@
CREATE TABLE IF NOT EXISTS `constance_config` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`key` varchar(255) NOT NULL,
`value` longtext NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `key` (`key`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
ALTER TABLE `profile_profile` ADD `login_id` varchar(225) DEFAULT NULL;
ALTER TABLE `profile_profile` ADD `contact_email` varchar(225) DEFAULT NULL;
ALTER TABLE `profile_profile` ADD `institution` varchar(225) DEFAULT NULL;
ALTER TABLE `profile_profile` ADD UNIQUE INDEX (`login_id`);
ALTER TABLE `profile_profile` ADD INDEX (`contact_email`);
ALTER TABLE `profile_profile` ADD INDEX (`institution`);

View File

@ -1,13 +0,0 @@
CREATE TABLE IF NOT EXISTS "constance_config" (
"id" integer NOT NULL PRIMARY KEY,
"key" varchar(255) NOT NULL UNIQUE,
"value" text NOT NULL
);
ALTER TABLE "profile_profile" ADD COLUMN "login_id" varchar(225);
ALTER TABLE "profile_profile" ADD COLUMN "contact_email" varchar(225);
ALTER TABLE "profile_profile" ADD COLUMN "institution" varchar(225);
CREATE UNIQUE INDEX "profile_profile_1b43c217" ON "profile_profile" ("login_id");
CREATE INDEX "profile_profile_3b46cb17" ON "profile_profile" ("contact_email");
CREATE INDEX "profile_profile_71bbc151" ON "profile_profile" ("institution");

View File

@ -1 +0,0 @@
alter table RepoTokenPeerInfo add client_ver varchar(20);

View File

@ -1,124 +0,0 @@
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
CREATE TABLE IF NOT EXISTS `post_office_attachment` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`file` varchar(100) NOT NULL,
`name` varchar(255) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `post_office_attachment_emails` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`attachment_id` int(11) NOT NULL,
`email_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `attachment_id` (`attachment_id`,`email_id`),
KEY `post_office_attachment_emails_4be595e7` (`attachment_id`),
KEY `post_office_attachment_emails_830a6ccb` (`email_id`),
CONSTRAINT `attachment_id_refs_id_2d59d8fc` FOREIGN KEY (`attachment_id`) REFERENCES `post_office_attachment` (`id`),
CONSTRAINT `email_id_refs_id_061d81d8` FOREIGN KEY (`email_id`) REFERENCES `post_office_email` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `post_office_emailtemplate` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(255) NOT NULL,
`description` longtext NOT NULL,
`created` datetime NOT NULL,
`last_updated` datetime NOT NULL,
`subject` varchar(255) NOT NULL,
`content` longtext NOT NULL,
`html_content` longtext NOT NULL,
`language` varchar(12) NOT NULL,
`default_template_id` int(11) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `language` (`language`,`default_template_id`),
KEY `post_office_emailtemplate_84c7951d` (`default_template_id`),
CONSTRAINT `default_template_id_refs_id_a2bc649e` FOREIGN KEY (`default_template_id`) REFERENCES `post_office_emailtemplate` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `post_office_email` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`from_email` varchar(254) NOT NULL,
`to` longtext NOT NULL,
`cc` longtext NOT NULL,
`bcc` longtext NOT NULL,
`subject` varchar(255) NOT NULL,
`message` longtext NOT NULL,
`html_message` longtext NOT NULL,
`status` smallint(5) unsigned DEFAULT NULL,
`priority` smallint(5) unsigned DEFAULT NULL,
`created` datetime NOT NULL,
`last_updated` datetime NOT NULL,
`scheduled_time` datetime DEFAULT NULL,
`headers` longtext,
`template_id` int(11) DEFAULT NULL,
`context` longtext,
`backend_alias` varchar(64) NOT NULL,
PRIMARY KEY (`id`),
KEY `post_office_email_48fb58bb` (`status`),
KEY `post_office_email_63b5ea41` (`created`),
KEY `post_office_email_470d4868` (`last_updated`),
KEY `post_office_email_c83ff05e` (`scheduled_time`),
KEY `post_office_email_43d23afc` (`template_id`),
CONSTRAINT `template_id_refs_id_a5d97662` FOREIGN KEY (`template_id`) REFERENCES `post_office_emailtemplate` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `post_office_log` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`email_id` int(11) NOT NULL,
`date` datetime NOT NULL,
`status` smallint(5) unsigned NOT NULL,
`exception_type` varchar(255) NOT NULL,
`message` longtext NOT NULL,
PRIMARY KEY (`id`),
KEY `post_office_log_830a6ccb` (`email_id`),
CONSTRAINT `email_id_refs_id_3d87f587` FOREIGN KEY (`email_id`) REFERENCES `post_office_email` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `institutions_institution` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(200) NOT NULL,
`create_time` datetime NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `institutions_institutionadmin` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user` varchar(254) NOT NULL,
`institution_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `i_institution_id_5f792d6fe9a87ac9_fk_institutions_institution_id` (`institution_id`),
CONSTRAINT `i_institution_id_5f792d6fe9a87ac9_fk_institutions_institution_id` FOREIGN KEY (`institution_id`) REFERENCES `institutions_institution` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `sysadmin_extra_userloginlog` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(255) NOT NULL,
`login_date` datetime NOT NULL,
`login_ip` varchar(128) NOT NULL,
PRIMARY KEY (`id`),
KEY `sysadmin_extra_userloginlog_14c4b06b` (`username`),
KEY `sysadmin_extra_userloginlog_28ed1ef0` (`login_date`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
ALTER TABLE `sysadmin_extra_userloginlog` MODIFY `login_ip` VARCHAR(128);
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;

View File

@ -1 +0,0 @@
alter table RepoTokenPeerInfo add client_ver varchar(20);

View File

@ -1,72 +0,0 @@
CREATE TABLE IF NOT EXISTS "post_office_attachment" (
"id" integer NOT NULL PRIMARY KEY,
"file" varchar(100) NOT NULL,
"name" varchar(255) NOT NULL
);
CREATE TABLE IF NOT EXISTS "post_office_attachment_emails" (
"id" integer NOT NULL PRIMARY KEY,
"attachment_id" integer NOT NULL,
"email_id" integer NOT NULL REFERENCES "post_office_email" ("id"),
UNIQUE ("attachment_id", "email_id")
);
CREATE TABLE IF NOT EXISTS "post_office_email" (
"id" integer NOT NULL PRIMARY KEY,
"from_email" varchar(254) NOT NULL,
"to" text NOT NULL,
"cc" text NOT NULL,
"bcc" text NOT NULL,
"subject" varchar(255) NOT NULL,
"message" text NOT NULL,
"html_message" text NOT NULL,
"status" smallint unsigned,
"priority" smallint unsigned,
"created" datetime NOT NULL,
"last_updated" datetime NOT NULL,
"scheduled_time" datetime,
"headers" text,
"template_id" integer,
"context" text,
"backend_alias" varchar(64) NOT NULL
);
CREATE TABLE IF NOT EXISTS "post_office_emailtemplate" (
"id" integer NOT NULL PRIMARY KEY,
"name" varchar(255) NOT NULL,
"description" text NOT NULL,
"created" datetime NOT NULL,
"last_updated" datetime NOT NULL,
"subject" varchar(255) NOT NULL,
"content" text NOT NULL,
"html_content" text NOT NULL,
"language" varchar(12) NOT NULL,
"default_template_id" integer,
UNIQUE ("language", "default_template_id")
);
CREATE TABLE IF NOT EXISTS "post_office_log" (
"id" integer NOT NULL PRIMARY KEY,
"email_id" integer NOT NULL REFERENCES "post_office_email" ("id"),
"date" datetime NOT NULL,
"status" smallint unsigned NOT NULL,
"exception_type" varchar(255) NOT NULL,
"message" text NOT NULL
);
CREATE TABLE IF NOT EXISTS "institutions_institution" (
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
"name" varchar(200) NOT NULL,
"create_time" datetime NOT NULL
);
CREATE TABLE IF NOT EXISTS "institutions_institutionadmin" (
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
"user" varchar(254) NOT NULL,
"institution_id" integer NOT NULL REFERENCES "institutions_institution" ("id")
);
CREATE INDEX IF NOT EXISTS "post_office_attachment_emails_4be595e7" ON "post_office_attachment_emails" ("attachment_id");
CREATE INDEX IF NOT EXISTS "post_office_attachment_emails_830a6ccb" ON "post_office_attachment_emails" ("email_id");
CREATE INDEX IF NOT EXISTS "post_office_email_43d23afc" ON "post_office_email" ("template_id");
CREATE INDEX IF NOT EXISTS "post_office_email_470d4868" ON "post_office_email" ("last_updated");
CREATE INDEX IF NOT EXISTS "post_office_email_48fb58bb" ON "post_office_email" ("status");
CREATE INDEX IF NOT EXISTS "post_office_email_63b5ea41" ON "post_office_email" ("created");
CREATE INDEX IF NOT EXISTS "post_office_email_c83ff05e" ON "post_office_email" ("scheduled_time");
CREATE INDEX IF NOT EXISTS "post_office_emailtemplate_84c7951d" ON "post_office_emailtemplate" ("default_template_id");
CREATE INDEX IF NOT EXISTS "post_office_log_830a6ccb" ON "post_office_log" ("email_id");
CREATE INDEX "institutions_institutionadmin_a964baeb" ON "institutions_institutionadmin" ("institution_id");

View File

@ -1,104 +0,0 @@
ALTER TABLE api2_tokenv2 ADD COLUMN wiped_at DATETIME DEFAULT NULL;
ALTER TABLE api2_tokenv2 ADD COLUMN created_at DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00";
CREATE TABLE IF NOT EXISTS `base_filecomment` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`repo_id` varchar(36) NOT NULL,
`parent_path` longtext NOT NULL,
`repo_id_parent_path_md5` varchar(100) NOT NULL,
`item_name` longtext NOT NULL,
`author` varchar(255) NOT NULL,
`comment` longtext NOT NULL,
`created_at` datetime NOT NULL,
`updated_at` datetime NOT NULL,
PRIMARY KEY (`id`),
KEY `base_filecomment_9a8c79bf` (`repo_id`),
KEY `base_filecomment_c5bf47d4` (`repo_id_parent_path_md5`),
KEY `base_filecomment_02bd92fa` (`author`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `termsandconditions_termsandconditions` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`slug` varchar(50) NOT NULL,
`name` longtext NOT NULL,
`version_number` decimal(6,2) NOT NULL,
`text` longtext,
`info` longtext,
`date_active` datetime DEFAULT NULL,
`date_created` datetime NOT NULL,
PRIMARY KEY (`id`),
KEY `termsandconditions_termsandconditions_2dbcba41` (`slug`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `termsandconditions_usertermsandconditions` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(255) NOT NULL,
`ip_address` char(39) DEFAULT NULL,
`date_accepted` datetime NOT NULL,
`terms_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `termsandconditions_usertermsandcon_username_f4ab54cafa29322_uniq` (`username`,`terms_id`),
KEY `e4da106203f3f13ff96409b55de6f515` (`terms_id`),
CONSTRAINT `e4da106203f3f13ff96409b55de6f515` FOREIGN KEY (`terms_id`) REFERENCES `termsandconditions_termsandconditions` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `two_factor_totpdevice` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user` varchar(255) NOT NULL,
`name` varchar(64) NOT NULL,
`confirmed` tinyint(1) NOT NULL,
`key` varchar(80) NOT NULL,
`step` smallint(5) unsigned NOT NULL,
`t0` bigint(20) NOT NULL,
`digits` smallint(5) unsigned NOT NULL,
`tolerance` smallint(5) unsigned NOT NULL,
`drift` smallint(6) NOT NULL,
`last_t` bigint(20) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `user` (`user`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `two_factor_phonedevice` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user` varchar(255) NOT NULL,
`name` varchar(64) NOT NULL,
`confirmed` tinyint(1) NOT NULL,
`number` varchar(40) NOT NULL,
`key` varchar(40) NOT NULL,
`method` varchar(4) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `user` (`user`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `two_factor_staticdevice` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user` varchar(255) NOT NULL,
`name` varchar(64) NOT NULL,
`confirmed` tinyint(1) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `user` (`user`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `two_factor_statictoken` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`token` varchar(16) NOT NULL,
`device_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `two_fac_device_id_55a7b345293a7c6c_fk_two_factor_staticdevice_id` (`device_id`),
KEY `two_factor_statictoken_94a08da1` (`token`),
CONSTRAINT `two_fac_device_id_55a7b345293a7c6c_fk_two_factor_staticdevice_id` FOREIGN KEY (`device_id`) REFERENCES `two_factor_staticdevice` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS `invitations_invitation` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`token` varchar(40) NOT NULL,
`inviter` varchar(255) NOT NULL,
`accepter` varchar(255) NOT NULL,
`invite_time` datetime NOT NULL,
`accept_time` datetime DEFAULT NULL,
`invite_type` varchar(20) NOT NULL,
`expire_time` datetime NOT NULL,
PRIMARY KEY (`id`),
KEY `invitations_invitation_d5dd16f8` (`inviter`),
KEY `invitations_invitation_token_1961fbb98c05e5fd_uniq` (`token`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

View File

@ -1,24 +0,0 @@
CREATE TABLE IF NOT EXISTS "base_filecomment" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "repo_id" varchar(36) NOT NULL, "parent_path" text NOT NULL, "repo_id_parent_path_md5" varchar(100) NOT NULL, "item_name" text NOT NULL, "author" varchar(255) NOT NULL, "comment" text NOT NULL, "created_at" datetime NOT NULL, "updated_at" datetime NOT NULL);
CREATE INDEX IF NOT EXISTS "base_filecomment_02bd92fa" ON "base_filecomment" ("author");
CREATE INDEX IF NOT EXISTS "base_filecomment_9a8c79bf" ON "base_filecomment" ("repo_id");
CREATE INDEX IF NOT EXISTS "base_filecomment_c5bf47d4" ON "base_filecomment" ("repo_id_parent_path_md5");
CREATE TABLE IF NOT EXISTS "termsandconditions_termsandconditions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "slug" varchar(50) NOT NULL, "name" text NOT NULL, "version_number" decimal NOT NULL, "text" text NULL, "info" text NULL, "date_active" datetime NULL, "date_created" datetime NOT NULL);
CREATE INDEX IF NOT EXISTS "termsandconditions_termsandconditions_2dbcba41" ON "termsandconditions_termsandconditions" ("slug");
CREATE TABLE IF NOT EXISTS "termsandconditions_usertermsandconditions" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL, "ip_address" char(39) NULL, "date_accepted" datetime NOT NULL, "terms_id" integer NOT NULL REFERENCES "termsandconditions_termsandconditions" ("id"), UNIQUE ("username", "terms_id"));
CREATE INDEX IF NOT EXISTS "termsandconditions_usertermsandconditions_2ab34720" ON "termsandconditions_usertermsandconditions" ("terms_id");
CREATE TABLE IF NOT EXISTS "two_factor_phonedevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL, "number" varchar(40) NOT NULL, "key" varchar(40) NOT NULL, "method" varchar(4) NOT NULL);
CREATE TABLE IF NOT EXISTS "two_factor_staticdevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL);
CREATE TABLE IF NOT EXISTS "two_factor_statictoken" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(16) NOT NULL, "device_id" integer NOT NULL REFERENCES "two_factor_staticdevice" ("id"));
CREATE TABLE IF NOT EXISTS "two_factor_totpdevice" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "user" varchar(255) NOT NULL UNIQUE, "name" varchar(64) NOT NULL, "confirmed" bool NOT NULL, "key" varchar(80) NOT NULL, "step" smallint unsigned NOT NULL, "t0" bigint NOT NULL, "digits" smallint unsigned NOT NULL, "tolerance" smallint unsigned NOT NULL, "drift" smallint NOT NULL, "last_t" bigint NOT NULL);
CREATE INDEX IF NOT EXISTS "two_factor_statictoken_94a08da1" ON "two_factor_statictoken" ("token");
CREATE INDEX IF NOT EXISTS "two_factor_statictoken_9379346c" ON "two_factor_statictoken" ("device_id");
CREATE TABLE IF NOT EXISTS "invitations_invitation" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "token" varchar(40) NOT NULL, "inviter" varchar(255) NOT NULL, "accepter" varchar(255) NOT NULL, "invite_time" datetime NOT NULL, "accept_time" datetime NULL, "invite_type" varchar(20) NOT NULL, "expire_time" datetime NOT NULL);
CREATE INDEX IF NOT EXISTS "invitations_invitation_94a08da1" ON "invitations_invitation" ("token");
CREATE INDEX IF NOT EXISTS "invitations_invitation_d5dd16f8" ON "invitations_invitation" ("inviter");
ALTER TABLE api2_tokenv2 ADD COLUMN wiped_at datetime DEFAULT NULL;
ALTER TABLE api2_tokenv2 ADD COLUMN created_at datetime NOT NULL DEFAULT '1970-01-01 00:00:00';

Some files were not shown because too many files have changed in this diff Show More