Merge branch 'feature/backup' of github.com:ONLYOFFICE/AppServer into feature/backup

This commit is contained in:
Tatiana Lopaeva 2022-03-18 15:08:28 +03:00
commit c22bc6d809
13 changed files with 285 additions and 122 deletions

View File

@ -31,14 +31,11 @@ if [ "$DOCUMENT_SERVER_INSTALLED" = "false" ]; then
echo ${package_sysname}-documentserver $DS_COMMON_NAME/ds-port select $DS_PORT | sudo debconf-set-selections
echo ${package_sysname}-documentserver $DS_COMMON_NAME/db-pwd select $DS_DB_PWD | sudo debconf-set-selections
echo ${package_sysname}-documentserver $DS_COMMON_NAME/db-user $DS_DB_USER | sudo debconf-set-selections
echo ${package_sysname}-documentserver $DS_COMMON_NAME/db-name $DS_DB_NAME | sudo debconf-set-selections
echo ${package_sysname}-documentserver-de $DS_COMMON_NAME/jwt-enabled select ${DS_JWT_ENABLED} | sudo debconf-set-selections
echo ${package_sysname}-documentserver-de $DS_COMMON_NAME/jwt-secret select ${DS_JWT_SECRET} | sudo debconf-set-selections
echo ${package_sysname}-documentserver-de $DS_COMMON_NAME/jwt-header select ${DS_JWT_HEADER} | sudo debconf-set-selections
echo ${package_sysname}-documentserver-ee $DS_COMMON_NAME/jwt-enabled select ${DS_JWT_ENABLED} | sudo debconf-set-selections
echo ${package_sysname}-documentserver-ee $DS_COMMON_NAME/jwt-secret select ${DS_JWT_SECRET} | sudo debconf-set-selections
echo ${package_sysname}-documentserver-ee $DS_COMMON_NAME/jwt-header select ${DS_JWT_HEADER} | sudo debconf-set-selections
echo ${package_sysname}-documentserver $DS_COMMON_NAME/db-user select $DS_DB_USER | sudo debconf-set-selections
echo ${package_sysname}-documentserver $DS_COMMON_NAME/db-name select $DS_DB_NAME | sudo debconf-set-selections
echo ${package_sysname}-documentserver $DS_COMMON_NAME/jwt-enabled select ${DS_JWT_ENABLED} | sudo debconf-set-selections
echo ${package_sysname}-documentserver $DS_COMMON_NAME/jwt-secret select ${DS_JWT_SECRET} | sudo debconf-set-selections
echo ${package_sysname}-documentserver $DS_COMMON_NAME/jwt-header select ${DS_JWT_HEADER} | sudo debconf-set-selections
apt-get install -yq ${package_sysname}-documentserver
elif [ "$UPDATE" = "true" ] && [ "$DOCUMENT_SERVER_INSTALLED" = "true" ]; then
@ -65,55 +62,17 @@ else
systemctl reload nginx
fi
APPSERVER_INSTALLED_VERSION=$(apt-cache policy ${product} | awk 'NR==2{print $2}')
APPSERVER_LATEST_VERSION=$(apt-cache policy ${product} | awk 'NR==3{print $2}')
if [ "$APPSERVER_INSTALLED_VERSION" != "$APPSERVER_LATEST_VERSION" ]; then
APPSERVER_NEED_UPDATE="true"
fi
if [ "$APPSERVER_INSTALLED" = "false" ]; then
echo ${product} ${product}/db-pwd select $MYSQL_SERVER_PASS | sudo debconf-set-selections
echo ${product} ${product}/db-user select $MYSQL_SERVER_USER | sudo debconf-set-selections
echo ${product} ${product}/db-name select $MYSQL_SERVER_DB_NAME | sudo debconf-set-selections
apt-get install -y ${product} || true #Fix error 'Failed to fetch'
apt-get install -y ${product}
elif [ "$APPSERVER_NEED_UPDATE" = "true" ]; then
ENVIRONMENT="$(cat /lib/systemd/system/${product}-api.service | grep -oP 'ENVIRONMENT=\K.*')"
USER_CONNECTIONSTRING=$(json -f /etc/onlyoffice/${product}/appsettings.$ENVIRONMENT.json ConnectionStrings.default.connectionString)
MYSQL_SERVER_HOST=$(echo $USER_CONNECTIONSTRING | grep -oP 'Server=\K.*' | grep -o '^[^;]*')
MYSQL_SERVER_DB_NAME=$(echo $USER_CONNECTIONSTRING | grep -oP 'Database=\K.*' | grep -o '^[^;]*')
MYSQL_SERVER_USER=$(echo $USER_CONNECTIONSTRING | grep -oP 'User ID=\K.*' | grep -o '^[^;]*')
MYSQL_SERVER_PORT=$(echo $USER_CONNECTIONSTRING | grep -oP 'Port=\K.*' | grep -o '^[^;]*')
MYSQL_SERVER_PASS=$(echo $USER_CONNECTIONSTRING | grep -oP 'Password=\K.*' | grep -o '^[^;]*')
elif [ "$UPDATE" = "true" ] && [ "$APPSERVER_INSTALLED" = "true" ]; then
apt-get install -o DPkg::options::="--force-confnew" -y --only-upgrade ${product} elasticsearch=${ELASTIC_VERSION}
fi
if [ "${APPSERVER_INSTALLED}" = "false" ] || [ "${APPSERVER_NEED_UPDATE}" = "true" ]; then
expect << EOF
set timeout -1
log_user 1
if { "${UPDATE}" == "true" } {
spawn ${product}-configuration.sh -e ${ENVIRONMENT}
} else {
spawn ${product}-configuration.sh
}
expect -re "Database host:"
send "\025$MYSQL_SERVER_HOST\r"
expect -re "Database name:"
send "\025$MYSQL_SERVER_DB_NAME\r"
expect -re "Database user:"
send "\025$MYSQL_SERVER_USER\r"
expect -re "Database password:"
send "\025$MYSQL_SERVER_PASS\r"
expect eof
EOF
APPSERVER_INSTALLED="true";
fi
echo ""
echo "$RES_INSTALL_SUCCESS"
echo "$RES_QUESTIONS"

View File

@ -65,7 +65,7 @@ if [ "$(ls "$PRODUCT_DIR/services/kafka" 2> /dev/null)" == "" ]; then
KAFKA_ARCHIVE=$(curl https://downloads.apache.org/kafka/$KAFKA_VERSION/ | grep -Eo "kafka_2.[0-9][0-9]-$KAFKA_VERSION.tgz" | tail -1)
curl https://downloads.apache.org/kafka/$KAFKA_VERSION/$KAFKA_ARCHIVE -O
tar xzf $KAFKA_ARCHIVE --strip 1 && rm -rf $KAFKA_ARCHIVE
chown -R kafka ${PRODUCT_DIR}/services/kafka
chown -R kafka ${PRODUCT_DIR}/services/kafka/
cd -
fi
@ -83,6 +83,7 @@ Restart=on-abnormal
[Install]
WantedBy=multi-user.target
END
systemctl start zookeeper
fi
if [ ! -e /lib/systemd/system/kafka.service ]; then
@ -99,6 +100,7 @@ Restart=on-abnormal
[Install]
WantedBy=multi-user.target
END
systemctl start kafka
fi
if ! dpkg -l | grep -q "mysql-server"; then

View File

@ -97,6 +97,13 @@ while [ "$1" != "" ]; do
fi
;;
-ess | --elasticsheme )
if [ "$2" != "" ]; then
ELK_SHEME=$2
shift
fi
;;
-esh | --elastichost )
if [ "$2" != "" ]; then
ELK_HOST=$2
@ -106,7 +113,7 @@ while [ "$1" != "" ]; do
-esp | --elasticport )
if [ "$2" != "" ]; then
ELK_HOST=$2
ELK_PORT=$2
shift
fi
;;
@ -118,6 +125,34 @@ while [ "$1" != "" ]; do
fi
;;
-mysqlh | --mysqlhost )
if [ "$2" != "" ]; then
DB_HOST=$2
shift
fi
;;
-mysqld | --mysqldatabase )
if [ "$2" != "" ]; then
DB_NAME=$2
shift
fi
;;
-mysqlu | --mysqluser )
if [ "$2" != "" ]; then
DB_USER=$2
shift
fi
;;
-mysqlp | --mysqlpassword )
if [ "$2" != "" ]; then
DB_PWD=$2
shift
fi
;;
-? | -h | --help )
echo " Usage: bash ${PRODUCT}-configuration.sh [PARAMETER] [[PARAMETER], ...]"
echo
@ -132,6 +167,10 @@ while [ "$1" != "" ]; do
echo " -zkp, --zookeeperport zookeeper port (default 2181)"
echo " -esh, --elastichost elasticsearch ip"
echo " -esp, --elasticport elasticsearch port (default 9200)"
echo " -mysqlh, --mysqlhost mysql server host"
echo " -mysqld, --mysqldatabase ${PRODUCT} database name"
echo " -mysqlu, --mysqluser ${PRODUCT} database user"
echo " -mysqlp, --mysqlpassword ${PRODUCT} database password"
echo " -e, --environment environment (default 'production')"
echo " -?, -h, --help this help"
echo
@ -203,24 +242,10 @@ input_db_params(){
local def_DB_NAME=$(echo $user_connectionString | grep -oP 'Database=\K.*' | grep -o '^[^;]*')
local def_DB_USER=$(echo $user_connectionString | grep -oP 'User ID=\K.*' | grep -o '^[^;]*')
read -e -p "Database host: " -i "$DB_HOST" DB_HOST
read -e -p "Database name: " -i "$DB_NAME" DB_NAME
read -e -p "Database user: " -i "$DB_USER" DB_USER
read -e -p "Database password: " -s DB_PWD
if [ -z $DB_HOST ]; then
DB_HOST="${def_DB_HOST}";
fi
if [ -z $DB_NAME ]; then
DB_NAME="${def_DB_NAME}";
fi
if [ -z $DB_USER ]; then
DB_USER="${def_DB_USER}";
fi
echo
if [ -z $def_DB_HOST ] && [ -z $DB_HOST ]; then read -e -p "Database host: " -i "$DB_HOST" DB_HOST; fi
if [ -z $def_DB_NAME ] && [ -z $DB_NAME ]; then read -e -p "Database name: " -i "$DB_NAME" DB_NAME; fi
if [ -z $def_DB_USER ] && [ -z $DB_USER ]; then read -e -p "Database user: " -i "$DB_USER" DB_USER; fi
if [ -z $DB_PWD ]; then read -e -p "Database password: " -i "$DB_PWD" DB_PWD; fi
}
establish_mysql_conn(){
@ -575,10 +600,6 @@ elif command -v apt >/dev/null 2>&1; then
DIST="Debian"
PACKAGE_MANAGER="dpkg -l"
MYSQL_PACKAGE="mysql"
mkdir -p /var/log/onlyoffice/appserver/ /etc/onlyoffice/appserver/.private/
chown -R onlyoffice:onlyoffice /var/www/appserver/ /var/log/onlyoffice/appserver/ /etc/onlyoffice/appserver/
chown -R kafka /var/www/appserver/services/kafka/
systemctl restart kafka zookeeper
fi
install_json

View File

@ -0,0 +1,2 @@
/var/log/onlyoffice/appserver
/etc/onlyoffice/appserver/.private

View File

@ -18,3 +18,4 @@ if ! cat /etc/passwd | grep -q "nginx:"; then
fi
usermod -aG onlyoffice,nginx onlyoffice
chown onlyoffice:onlyoffice /var/log/onlyoffice/appserver /var/www/appserver /etc/onlyoffice/appserver

View File

@ -0,0 +1,24 @@
#!/bin/sh -e
set -e
. /usr/share/debconf/confmodule
db_input medium appserver/environment || true
db_input medium appserver/host || true
db_input medium appserver/port || true
db_input medium appserver/kafka-host || true
db_input medium appserver/kafka-port || true
db_input medium appserver/zookeeper-host || true
db_input medium appserver/zookeeper-port || true
db_input medium appserver/elasticsearch-sheme || true
db_input medium appserver/elasticsearch-host || true
db_input medium appserver/elasticsearch-port || true
db_input medium appserver/db-host || true
db_input medium appserver/db-name || true
db_input medium appserver/db-user || true
db_go
db_input critical appserver/db-pwd || true
db_go

View File

@ -1,3 +0,0 @@
#!/bin/sh -e
set -e

View File

@ -0,0 +1,65 @@
#!/bin/sh -e
set -e
. /usr/share/debconf/confmodule
case "$1" in
configure)
db_get appserver/environment || true
ENVIRONMENT="$RET"
db_get appserver/host || true
APP_HOST="$RET"
db_get appserver/port || true
APP_PORT="$RET"
db_get appserver/db-host || true
DB_HOST="$RET"
db_get appserver/db-name || true
DB_NAME="$RET"
db_get appserver/db-user || true
DB_USER="$RET"
db_get appserver/db-pwd || true
DB_PWD="$RET"
db_get appserver/kafka-host || true
KAFKA_HOST="$RET"
db_get appserver/kafka-port || true
KAFKA_PORT="$RET"
db_get appserver/zookeeper-host || true
ZOOKEEPER_HOST="$RET"
db_get appserver/zookeeper-port || true
ZOOKEEPER_PORT="$RET"
db_get appserver/elasticsearch-sheme || true
ELK_SHEME="$RET"
db_get appserver/elasticsearch-host || true
ELK_HOST="$RET"
db_get appserver/elasticsearch-port || true
ELK_PORT="$RET"
db_get onlyoffice/db-host || true
DOCUMENT_SERVER_HOST="$RET"
db_get onlyoffice/ds-port || true
DOCUMENT_SERVER_PORT="$RET"
bash /usr/bin/appserver-configuration.sh -e $ENVIRONMENT -mysqlh $DB_HOST -mysqld $DB_NAME -mysqlu $DB_USER -mysqlp $DB_PWD -ash $APP_HOST -asp $APP_PORT \
-dsh $DOCUMENT_SERVER_HOST -dsp $DOCUMENT_SERVER_PORT -kh $KAFKA_HOST -kp $KAFKA_PORT -zkh $ZOOKEEPER_HOST -zkp $ZOOKEEPER_PORT -ess $ELK_SHEME -esh $ELK_HOST -esp $ELK_PORT
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

View File

@ -38,8 +38,13 @@ override_dh_auto_build:
sed -i "s@var/www@var/www/${PRODUCT}@g" ${SRC_PATH}/config/nginx/*.conf
sed -i "s@var/www@var/www/${PRODUCT}@g" ${SRC_PATH}/config/nginx/includes/*.conf
override_dh_fixperms:
dh_fixperms
override_dh_auto_install:
dh_installinit
dh_systemd_enable
dh_systemd_start --no-start
override_dh_strip:
# dh_strip --exclude=/site-packages/

View File

@ -1,3 +1,18 @@
Template: appserver/environment
Type: string
Default: production
Description: Select environment for AppServer configuration:
Template: appserver/host
Type: string
Default: localhost
Description: AppServer host:
Template: appserver/port
Type: string
Default: 80
Description: AppServer port:
Template: appserver/db-host
Type: string
Default: localhost
@ -17,23 +32,39 @@ Type: string
Default: onlyoffice
Description: MySQL database name:
Template: appserver/remove-db
Type: boolean
Default: false
Description: Remove database?
This operation will remove the database which contain all data. It is recommended to take backup before removing the database.
Template: appserver/ds-jwt-enabled
Type: boolean
Default: false
Description: To enabled Document Server JWT?:
Template: appserver/ds-jwt-secret
Template: appserver/kafka-host
Type: string
Default: {{package_sysname}}
Description: Document Server JWT Secret:
Default: localhost
Description: Kafka host:
Template: appserver/ds-jwt-secret-header
Template: appserver/kafka-port
Type: string
Default: AuthorizationJwt
Description: Document Server Secret Header:
Default: 9092
Description: Kafka port:
Template: appserver/zookeeper-host
Type: string
Default: localhost
Description: Zookeeper host:
Template: appserver/zookeeper-port
Type: string
Default: 2181
Description: Zookeeper port:
Template: appserver/elasticsearch-sheme
Type: select
Choices: http, https
Default: http
Description: Elasticsearch sheme:
Template: appserver/elasticsearch-host
Type: string
Default: localhost
Description: Elasticsearch host:
Template: appserver/elasticsearch-port
Type: string
Default: 9200
Description: Elasticsearch port:

View File

@ -89,7 +89,7 @@ namespace ASC.ElasticSearch
private bool IsExist { get; set; }
private Client Client { get; }
private ILog Log { get; }
private TenantManager TenantManager { get; }
protected TenantManager TenantManager { get; }
private BaseIndexerHelper BaseIndexerHelper { get; }
private Settings Settings { get; }
private IServiceProvider ServiceProvider { get; }
@ -116,7 +116,8 @@ namespace ASC.ElasticSearch
internal void Index(T data, bool immediately = true)
{
CreateIfNotExist(data);
if (!BeforeIndex(data)) return;
Client.Instance.Index(data, idx => GetMeta(idx, data, immediately));
}
@ -124,7 +125,7 @@ namespace ASC.ElasticSearch
{
if (data.Count == 0) return;
CreateIfNotExist(data[0]);
if (!CheckExist(data[0])) return;
if (data[0] is ISearchItemDocument)
{
@ -135,7 +136,9 @@ namespace ASC.ElasticSearch
for (var i = 0; i < data.Count; i++)
{
var t = data[i];
var runBulk = i == data.Count - 1;
var runBulk = i == data.Count - 1;
BeforeIndex(t);
if (!(t is ISearchItemDocument wwd) || wwd.Document == null || string.IsNullOrEmpty(wwd.Document.Data))
{
@ -207,14 +210,19 @@ namespace ASC.ElasticSearch
}
}
else
{
{
foreach (var item in data)
{
BeforeIndex(item);
}
Client.Instance.Bulk(r => r.IndexMany(data, GetMeta));
}
}
internal async Task IndexAsync(List<T> data, bool immediately = true)
{
CreateIfNotExist(data[0]);
if (!CheckExist(data[0])) return;
if (data is ISearchItemDocument)
{
@ -227,6 +235,8 @@ namespace ASC.ElasticSearch
var t = data[i];
var runBulk = i == data.Count - 1;
await BeforeIndexAsync(t);
var wwd = t as ISearchItemDocument;
if (wwd == null || wwd.Document == null || string.IsNullOrEmpty(wwd.Document.Data))
@ -298,31 +308,36 @@ namespace ASC.ElasticSearch
}
else
{
foreach (var item in data)
{
await BeforeIndexAsync(item);
}
await Client.Instance.BulkAsync(r => r.IndexMany(data, GetMeta));
}
}
internal void Update(T data, bool immediately = true, params Expression<Func<T, object>>[] fields)
{
CreateIfNotExist(data);
if (!CheckExist(data)) return;
Client.Instance.Update(DocumentPath<T>.Id(data), r => GetMetaForUpdate(r, data, immediately, fields));
}
internal void Update(T data, UpdateAction action, Expression<Func<T, IList>> fields, bool immediately = true)
{
CreateIfNotExist(data);
if (!CheckExist(data)) return;
Client.Instance.Update(DocumentPath<T>.Id(data), r => GetMetaForUpdate(r, data, action, fields, immediately));
}
internal void Update(T data, Expression<Func<Selector<T>, Selector<T>>> expression, int tenantId, bool immediately = true, params Expression<Func<T, object>>[] fields)
{
CreateIfNotExist(data);
if (!CheckExist(data)) return;
Client.Instance.UpdateByQuery(GetDescriptorForUpdate(data, expression, tenantId, immediately, fields));
}
internal void Update(T data, Expression<Func<Selector<T>, Selector<T>>> expression, int tenantId, UpdateAction action, Expression<Func<T, IList>> fields, bool immediately = true)
{
CreateIfNotExist(data);
if (!CheckExist(data)) return;
Client.Instance.UpdateByQuery(GetDescriptorForUpdate(data, expression, tenantId, action, fields, immediately));
}
@ -389,8 +404,7 @@ namespace ASC.ElasticSearch
Log.DebugFormat("Delete {0}", Wrapper.IndexName);
Client.Instance.Indices.Delete(Wrapper.IndexName);
BaseIndexerHelper.Clear(Wrapper);
CreateIfNotExist(Wrapper);
BaseIndexerHelper.Clear(Wrapper);
}
internal IReadOnlyCollection<T> Select(Expression<Func<Selector<T>, Selector<T>>> expression, bool onlyId = false)
@ -411,6 +425,16 @@ namespace ASC.ElasticSearch
return result.Documents;
}
protected virtual bool BeforeIndex(T data)
{
return CheckExist(data);
}
protected virtual Task<bool> BeforeIndexAsync(T data)
{
return Task.FromResult(CheckExist(data));
}
public void CreateIfNotExist(T data)
{
try

View File

@ -61,7 +61,7 @@ namespace ASC.Web.CRM.Configuration
}
public override async Task<List<UsageSpaceStatItem>> GetStatDataAsync()
public override async ValueTask<List<UsageSpaceStatItem>> GetStatDataAsync()
{
var spaceUsage = await _filesDbContext.Files.AsQueryable().Join(_filesDbContext.Tree,
x => x.FolderId,

View File

@ -32,6 +32,8 @@ using ASC.Common;
using ASC.Common.Caching;
using ASC.Common.Logging;
using ASC.Core;
using ASC.Core.Common.EF;
using ASC.Core.Common.EF.Context;
using ASC.Core.Common.EF.Model;
using ASC.ElasticSearch;
using ASC.ElasticSearch.Core;
@ -42,8 +44,51 @@ using ASC.Files.Core.EF;
using ASC.Files.Core.Resources;
using Microsoft.Extensions.Options;
namespace ASC.Web.Files.Core.Search
{
[Scope]
public class BaseIndexerFile : BaseIndexer<DbFile>
{
private readonly IDaoFactory _daoFactory;
public BaseIndexerFile(
Client client,
IOptionsMonitor<ILog> log,
DbContextManager<WebstudioDbContext> dbContextManager,
TenantManager tenantManager, BaseIndexerHelper baseIndexerHelper,
Settings settings,
IServiceProvider serviceProvider,
IDaoFactory daoFactory)
: base(client, log, dbContextManager, tenantManager, baseIndexerHelper, settings, serviceProvider)
{
_daoFactory = daoFactory;
}
protected override bool BeforeIndex(DbFile data)
{
if (!base.BeforeIndex(data)) return false;
var fileDao = _daoFactory.GetFileDao<int>() as FileDao;
TenantManager.SetCurrentTenant(data.TenantId);
fileDao.InitDocumentAsync(data).Wait();
return true;
}
protected override async Task<bool> BeforeIndexAsync(DbFile data)
{
if (!base.BeforeIndex(data)) return false;
var fileDao = _daoFactory.GetFileDao<int>() as FileDao;
TenantManager.SetCurrentTenant(data.TenantId);
await fileDao.InitDocumentAsync(data);
return true;
}
}
[Scope(Additional = typeof(FactoryIndexerFileExtension))]
public class FactoryIndexerFile : FactoryIndexer<DbFile>
{
@ -55,7 +100,7 @@ namespace ASC.Web.Files.Core.Search
TenantManager tenantManager,
SearchSettingsHelper searchSettingsHelper,
FactoryIndexer factoryIndexer,
BaseIndexer<DbFile> baseIndexer,
BaseIndexerFile baseIndexer,
IServiceProvider serviceProvider,
IDaoFactory daoFactory,
ICache cache,
@ -145,22 +190,10 @@ namespace ASC.Web.Files.Core.Search
{
if (Settings.Threads == 1)
{
data.ForEach(r =>
{
TenantManager.SetCurrentTenant(r.TenantId);
fileDao.InitDocumentAsync(r).Wait();
});
Index(data);
}
else
{
//TODO: refactoring
data.ForEach(r =>
{
TenantManager.SetCurrentTenant(r.TenantId);
fileDao.InitDocumentAsync(r).Wait();
});
tasks.Add(IndexAsync(data));
j++;
if (j >= Settings.Threads)
@ -184,7 +217,6 @@ namespace ASC.Web.Files.Core.Search
}
}
public override string SettingsTitle
{
get { return FilesCommonResource.IndexTitle; }