Compare commits

...

19 Commits

Author SHA1 Message Date
c7d1324502 Add the ability for fluent-bit to handle https requests 2024-08-22 17:18:33 +03:00
1b9b1169b0
fix Bug 67176 (#315) 2024-08-21 18:25:31 +03:00
d37c21ca13 fix Bug 67176 2024-08-21 17:48:24 +03:00
43168089cb
fix Bug 69344 - Remove microphone restriction header (#313)
Co-authored-by: Alexey Safronov <Alexey.Safronov@onlyoffice.com>
2024-08-19 15:27:25 +03:00
523034c088
Refactoring OCI docker (#311) 2024-08-15 15:20:30 +03:00
d19abfe15e
Offline self-extracting archive release (#309) 2024-08-14 12:43:29 +03:00
3a6a9ac574
Add healthcheck for dependent containers (#308) 2024-08-14 12:43:06 +03:00
07cce071d2
fix Bug 69111 - Add an offline installation option for docker supply (#307)
Co-authored-by: Elbakyan Shirak <shirak.elbakyan@onlyoffice.com>
2024-08-14 12:42:37 +03:00
1ef4ec7e84
Add the ability to specify an external hub for OCI docker (#301) 2024-08-14 12:21:04 +03:00
204af6e047
fix Bug 69500 - Fix the functionality of services on the custom port (#306) 2024-08-12 18:45:54 +03:00
b43d9a5cda docker: WebStudio: support self queue 2024-08-08 20:10:15 +03:00
a090e14a30
Add a check to enable the CRB repository (#298) 2024-08-07 10:31:15 +03:00
84a7d52c1b
Implement a separate OCI test run for debian\redhat (#297) 2024-08-06 16:52:57 +03:00
2c892e1a6c
Fix OCI installation on AlmaLinux, Rocky Linux and other
Use CentOS install flow for default on RedHat-based distributions
2024-08-02 17:55:46 +03:00
Valeria Bagisheva
859a3bb009
Fix rename dashboards for release (#291) 2024-08-01 11:17:34 +03:00
4c7376852a Merge branch 'release/v2.6.0' into hotfix/v2.6.1 2024-07-31 16:59:21 +04:00
ecb6cba47e refactoring 2024-07-29 17:03:26 +03:00
dc1f9020fc refactoring header value 2024-07-29 16:22:23 +03:00
a376f1f5ed added Server-Timing header nginx level 2024-07-29 16:15:19 +03:00
31 changed files with 511 additions and 263 deletions

View File

@ -0,0 +1,120 @@
name: Install OneClickInstall Docker
on:
pull_request:
types: [opened, reopened, synchronize]
paths:
- '.github/workflows/ci-oci-docker-install.yml'
- 'install/OneClickInstall/install-Docker.sh'
workflow_dispatch:
inputs:
offline:
description: 'Publish 4testing offline archive'
required: true
default: false
type: boolean
jobs:
Install-OneClickInstall-Docker:
runs-on: ubuntu-22.04
steps:
- name: Determine Branch Name
id: set-branch-name
run: |
BRANCH_NAME=$([ "${{ github.event_name }}" = "pull_request" ] && echo "${{ github.event.pull_request.head.ref }}" || echo "${GITHUB_REF#refs/heads/}")
echo "BRANCH_NAME=${BRANCH_NAME:-master}" >> $GITHUB_ENV
- name: Test OCI docker scripts
run: |
sudo docker image prune --all --force
wget https://download.onlyoffice.com/docspace/docspace-enterprise-install.sh
sed '/bash install-Docker.sh/i sed -i "1i set -x" install-Docker.sh' -i docspace-enterprise-install.sh
sudo bash docspace-enterprise-install.sh docker -docsi onlyoffice/documentserver-ee -skiphc true -noni true $([ ${{ env.BRANCH_NAME }} != "master" ] && echo "-gb ${{ env.BRANCH_NAME }} -s 4testing-") || exit $?
echo -n "Waiting for all containers to start..."
timeout 300 bash -c 'while docker ps | grep -q "starting"; do sleep 5; done' && echo "OK" || echo "container_status=timeout" >> $GITHUB_ENV
- name: Check container status
run: |
docker ps --all --format "{{.Names}}" | xargs -I {} sh -c '
status=$(docker inspect --format="{{if .State.Health}}{{.State.Health.Status}}{{else}}no healthcheck{{end}}" {});
case "$status" in
healthy) color="\033[0;32m" ;; # green
"no healthcheck") color="\033[0;33m" ;; # yellow
*) color="\033[0;31m"; echo "container_status=red" >> $GITHUB_ENV ;; # red
esac;
printf "%-50s ${color}%s\033[0m\n" "{}:" "$status";
'
- name: Print logs for crashed container
run: |
docker ps --all --format "{{.Names}}" | xargs -I {} sh -c '
status=$(docker inspect --format="{{if .State.Health}}{{.State.Health.Status}}{{else}}no healthcheck{{end}}" {});
case "$status" in
healthy | "no healthcheck") ;;
*)
echo "Logs for container {}:";
docker logs --tail 30 {} | sed "s/^/\t/g";
;;
esac;
'
case "${{ env.container_status }}" in
timeout) echo "Timeout reached. Not all containers are running."; exit 1 ;;
red) echo "One or more containers have status 'red'. Job will fail."; exit 1 ;;
esac
- name: Checkout repository
if: ${{ github.event.inputs.offline == 'true' }}
uses: actions/checkout@v4
with:
ref: ${{ env.BRANCH_NAME }}
- name: Creating 4testing offline self-extracting archive
if: ${{ github.event.inputs.offline == 'true' }}
run: |
INSTALL_PATH=${{ github.workspace }}/install
docker stop $(docker ps -a -q) && docker rm $(docker ps -a -q) && docker volume rm $(docker volume ls -q)
sudo rm -rf /usr/local/lib/android /opt/ghc
docker images --format "{{.Repository}}:{{.Tag}}" | grep "4testing-" | xargs -I{} bash -c '
docker tag "$1" $(echo "${1/4testing-/}" | sed -E "s/([0-9]+\.[0-9]+\.[0-9]+)\.[0-9]+/\1/")
docker rmi "$1"
' _ {}
sed -i 's~\(OFFLINE_INSTALLATION="\|SKIP_HARDWARE_CHECK="\).*"$~\1true"~' "${INSTALL_PATH}/OneClickInstall/install-Docker.sh"
echo "Creating offline self-extracting archive..."
docker save $(docker images --format "{{.Repository}}:{{.Tag}}") | xz --verbose -T0 -z -9e > ${INSTALL_PATH}/docker_images.tar.xz
cd ${INSTALL_PATH}/docker && tar -czvf ${INSTALL_PATH}/docker.tar.gz --exclude='config/supervisor*' *.yml .env config/
tar -cvf ${INSTALL_PATH}/offline-docspace.tar \
-C "${INSTALL_PATH}/OneClickInstall" install-Docker.sh \
-C "${INSTALL_PATH}" docker_images.tar.xz \
-C "${INSTALL_PATH}" docker.tar.gz
rm -rf ${INSTALL_PATH}/docker_images.tar.xz ${INSTALL_PATH}/docker.tar.gz
echo "ARTIFACT_NAME=${ARTIFACT_NAME:=4testing-offline-docspace-installation.sh}" >> $GITHUB_ENV
cat ${INSTALL_PATH}/common/self-extracting.sh ${INSTALL_PATH}/offline-docspace.tar > ${INSTALL_PATH}/${ARTIFACT_NAME}
chmod +x ${INSTALL_PATH}/${ARTIFACT_NAME}
- name: Configure AWS Credentials
if: ${{ github.event.inputs.offline == 'true' }}
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_OCI }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_OCI }}
aws-region: us-east-1
- name: Upload 4testing offline self-extracting archive
if: ${{ github.event.inputs.offline == 'true' }}
run: |
aws s3 cp ${{ github.workspace }}/install/${{ env.ARTIFACT_NAME }} \
${{ secrets.AWS_BUCKET_URL_OCI }}/${{ env.ARTIFACT_NAME }} \
--acl public-read \
--content-type application/x-xz \
--metadata-directive REPLACE
aws cloudfront create-invalidation \
--distribution-id ${{ secrets.AWS_DISTRIBUTION_ID_OCI }} \
--paths "/docspace/${{ env.ARTIFACT_NAME }}"

View File

@ -5,9 +5,10 @@ on:
types: [opened, reopened, synchronize]
paths:
- '.github/workflows/ci-oci-install.yml'
- 'install/OneClickInstall/**'
- '!install/OneClickInstall/install-Docker.sh'
- '!install/OneClickInstall/docspace-install.sh'
- 'install/OneClickInstall/install-Debian/**'
- 'install/OneClickInstall/install-RedHat/**'
- 'install/OneClickInstall/install-Debian.sh'
- 'install/OneClickInstall/install-RedHat.sh'
schedule:
- cron: '00 20 * * 6' # At 23:00 on Saturday.
@ -57,6 +58,19 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- name: Checkout code
if: github.event_name == 'pull_request'
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Determine affected distributions
id: determine-distros
if: github.event_name == 'pull_request'
run: |
CHANGED_FILES=$(git diff --name-only ${{ github.event.pull_request.base.sha }} ${{ github.sha }})
echo "debian_changed=$(echo "$CHANGED_FILES" | grep -q 'install-Debian' && echo true || echo false)" >> $GITHUB_ENV
echo "redhat_changed=$(echo "$CHANGED_FILES" | grep -q 'install-RedHat' && echo true || echo false)" >> $GITHUB_ENV
- name: Set matrix names
id: set-matrix
@ -69,11 +83,20 @@ jobs:
{"execute": '${{ github.event.inputs.debian12 || true }}', "name": "Debian12", "os": "debian12", "distr": "generic"},
{"execute": '${{ github.event.inputs.ubuntu2004 || true }}', "name": "Ubuntu20.04", "os": "ubuntu2004", "distr": "generic"},
{"execute": '${{ github.event.inputs.ubuntu2204 || true }}', "name": "Ubuntu22.04", "os": "ubuntu2204", "distr": "generic"},
{"execute": '${{ github.event.inputs.ubuntu2204 || true }}', "name": "Ubuntu24.04", "os": "ubuntu-24.04", "distr": "bento"},
{"execute": '${{ github.event.inputs.ubuntu2404 || true }}', "name": "Ubuntu24.04", "os": "ubuntu-24.04", "distr": "bento"},
{"execute": '${{ github.event.inputs.fedora39 || true }}', "name": "Fedora39", "os": "39-cloud-base", "distr": "fedora"},
{"execute": '${{ github.event.inputs.fedora40 || true }}', "name": "Fedora40", "os": "fedora-40", "distr": "bento"}
]
}' | jq -c '{include: [.include[] | select(.execute == true)]}')
}' | jq -c '.include')
matrix=$(jq -c --arg REDHAT_CHANGED "${{ env.redhat_changed }}" --arg DEBIAN_CHANGED "${{ env.debian_changed }}" '
{ include: [.[] | select(
($REDHAT_CHANGED == "true" and $DEBIAN_CHANGED == "true" and .execute == true) or
($REDHAT_CHANGED == "true" and (.name | test("CentOS|Fedora"))) or
($DEBIAN_CHANGED == "true" and (.name | test("Debian|Ubuntu"))) or
($REDHAT_CHANGED == "false" and $DEBIAN_CHANGED == "false" and .execute == true))]
}' <<< "$matrix")
echo "matrix=${matrix}" >> $GITHUB_OUTPUT
vagrant-up:

45
.github/workflows/offline-release.yml vendored Normal file
View File

@ -0,0 +1,45 @@
name: Upload offline self-extracting archive
on:
workflow_dispatch:
jobs:
release:
name: Upload offline self-extracting archive
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set environment variables
run: |
DOCKER_VERSION=$(curl -s https://hub.docker.com/v2/repositories/onlyoffice/4testing-docspace-api/tags/ | jq -r '.results[].name' | grep -oE '^[0-9]+\.[0-9]+\.[0-9]+' | sort -V | tail -n 1)
echo "ARTIFACT_NAME=offline-docspace-installation.sh" >> $GITHUB_ENV
echo "ARTIFACT_VERSION_NAME=offline-docspace-${DOCKER_VERSION}-installation.sh" >> $GITHUB_ENV
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_OCI }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_OCI }}
aws-region: us-east-1
- name: Upload offline self-extracting archive (latest)
run: |
aws s3 cp ${{ secrets.AWS_BUCKET_URL_OCI }}/4testing-${{ env.ARTIFACT_NAME }} \
${{ secrets.AWS_BUCKET_URL_OCI }}/${{ env.ARTIFACT_NAME }} \
--acl public-read \
--metadata-directive REPLACE
- name: Upload offline self-extracting archive (versioned)
run: |
aws s3 cp ${{ secrets.AWS_BUCKET_URL_OCI }}/4testing-${{ env.ARTIFACT_NAME }} \
${{ secrets.AWS_BUCKET_URL_OCI }}/${{ env.ARTIFACT_VERSION_NAME }} \
--acl public-read \
--metadata-directive REPLACE
- name: Invalidate AWS CloudFront cache
run: |
aws cloudfront create-invalidation \
--distribution-id ${{ secrets.AWS_DISTRIBUTION_ID_OCI }} \
--paths "/docspace/${{ env.ARTIFACT_NAME }}" "/docspace/${{ env.ARTIFACT_VERSION_NAME }}"

View File

@ -529,39 +529,6 @@
"region" : ""
}
}
},
{
"type": "ASC.Core.Common.Configuration.DataStoreConsumer, ASC.Core.Common",
"services": [
{
"type": "ASC.Core.Common.Configuration.Consumer, ASC.Core.Common"
},
{
"type": "ASC.Core.Common.Configuration.DataStoreConsumer, ASC.Core.Common"
},
{
"key": "selectel",
"type": "ASC.Core.Common.Configuration.Consumer, ASC.Core.Common"
},
{
"key": "selectel",
"type": "ASC.Core.Common.Configuration.DataStoreConsumer, ASC.Core.Common"
}
],
"instanceScope": "perlifetimescope",
"parameters": {
"name": "selectel",
"order": "23",
"props": {
"authUser": "",
"authPwd": ""
},
"additional": {
"handlerType" : "ASC.Data.Storage.Selectel.SelectelStorage, ASC.Data.Storage",
"public_container" : "",
"private_container" : ""
}
}
},
{
"type": "ASC.FederatedLogin.LoginProviders.ZoomLoginProvider, ASC.FederatedLogin",

View File

@ -0,0 +1,12 @@
location ^~ /dashboards/ {
auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/.htpasswd_dashboards;
rewrite ^/dashboards(/.*)$ $1 break;
proxy_pass http://127.0.0.1:5601;
proxy_redirect off;
proxy_buffering off;
proxy_set_header Connection "Keep-Alive";
proxy_set_header Proxy-Connection "Keep-Alive";
}

View File

@ -54,6 +54,13 @@ map $request_uri $content_security_policy {
~*\/(ds-vpath)\/ "default-src *; script-src * 'unsafe-inline' 'unsafe-eval'; script-src-elem * 'unsafe-inline'; img-src * data:; style-src * 'unsafe-inline' data:; font-src * data:; frame-src * ascdesktop:; object-src; connect-src * ascdesktop:;";
}
map $request_time $request_time_ms {
~^0\.000$ 0;
~^0\.(?:0*)([^0].*)$ $1;
~^([^0][^.]*)\.(.*)$ $1$2;
}
include /etc/nginx/includes/onlyoffice-*.conf;
server_names_hash_bucket_size 128;
@ -65,7 +72,7 @@ server {
add_header X-Content-Type-Options "nosniff";
add_header X-Frame-Options $header_x_frame_options;
add_header Cache-Control $cache_control;
add_header Permissions-Policy "autoplay=(), geolocation=(), camera=(), microphone=(), interest-cohort=()";
add_header Permissions-Policy "autoplay=(), geolocation=(), camera=(), interest-cohort=()";
root $public_root;
etag on;
@ -158,18 +165,7 @@ server {
}
location ^~ /dashboards/ {
auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/.htpasswd_dashboards;
rewrite ^/dashboards(/.*)$ $1 break;
proxy_pass http://127.0.0.1:5601;
proxy_redirect off;
proxy_buffering off;
proxy_set_header Connection "Keep-Alive";
proxy_set_header Proxy-Connection "Keep-Alive";
}
include /etc/nginx/includes/server-*.conf;
location / {
proxy_pass http://127.0.0.1:5001;
@ -288,6 +284,9 @@ server {
}
location /api/2.0 {
add_header Trailer Server-Timing;
add_header Server-Timing "proxy-request-time;dur=${request_time_ms}";
location ~* /(files|privacyroom) {
proxy_pass http://127.0.0.1:5007;
}

View File

@ -39,7 +39,7 @@ PROXY_YML="${BASE_DIR}/proxy.yml"
STATUS=""
DOCKER_TAG=""
INSTALLATION_TYPE="ENTERPRISE"
IMAGE_NAME="${PACKAGE_SYSNAME}/${PRODUCT}-api"
IMAGE_NAME="${PACKAGE_SYSNAME}/${STATUS}${PRODUCT}-api"
CONTAINER_NAME="${PACKAGE_SYSNAME}-api"
NETWORK_NAME=${PACKAGE_SYSNAME}
@ -105,8 +105,9 @@ LETS_ENCRYPT_DOMAIN=""
LETS_ENCRYPT_MAIL=""
HELP_TARGET="install-Docker.sh";
OFFLINE_INSTALLATION="false"
SKIP_HARDWARE_CHECK="false";
SKIP_HARDWARE_CHECK="false"
EXTERNAL_PORT="80"
@ -492,6 +493,13 @@ while [ "$1" != "" ]; do
fi
;;
-off | --offline )
if [ "$2" != "" ]; then
OFFLINE_INSTALLATION=$2
shift
fi
;;
-? | -h | --help )
echo " Usage: bash $HELP_TARGET [PARAMETER] [[PARAMETER], ...]"
echo
@ -543,6 +551,7 @@ while [ "$1" != "" ]; do
echo " -lem, --letsencryptmail defines the domain administator mail address for Let's Encrypt certificate"
echo " -cf, --certfile path to the certificate file for the domain"
echo " -ckf, --certkeyfile path to the private key file for the certificate"
echo " -off, --offline set the script for offline installation (true|false)"
echo " -noni, --noninteractive auto confirm all questions (true|false)"
echo " -dbm, --databasemigration database migration (true|false)"
echo " -ms, --makeswap make swap file (true|false)"
@ -583,7 +592,7 @@ root_checking () {
fi
}
command_exists () {
is_command_exists () {
type "$1" &> /dev/null;
}
@ -738,22 +747,20 @@ check_hardware () {
fi
}
install_service () {
install_package () {
if ! is_command_exists $1; then
local COMMAND_NAME=$1
local PACKAGE_NAME=$2
local PACKAGE_NAME=${2:-"$COMMAND_NAME"}
local PACKAGE_NAME_APT=${PACKAGE_NAME%%|*}
local PACKAGE_NAME_YUM=${PACKAGE_NAME##*|}
PACKAGE_NAME=${PACKAGE_NAME:-"$COMMAND_NAME"}
if command_exists apt-get; then
apt-get -y update -qq
apt-get -y -q install $PACKAGE_NAME
elif command_exists yum; then
yum -y install $PACKAGE_NAME
if is_command_exists apt-get; then
apt-get -y -q install ${PACKAGE_NAME_APT:-$PACKAGE_NAME}
elif is_command_exists yum; then
yum -y install ${PACKAGE_NAME_YUM:-$PACKAGE_NAME}
fi
if ! command_exists $COMMAND_NAME; then
echo "Command $COMMAND_NAME not found"
exit 1;
is_command_exists $COMMAND_NAME || { echo "Command $COMMAND_NAME not found"; exit 1; }
fi
}
@ -767,10 +774,6 @@ check_ports () {
ARRAY_PORTS=();
USED_PORTS="";
if ! command_exists netstat; then
install_service netstat net-tools
fi
if [ "${EXTERNAL_PORT//[0-9]}" = "" ]; then
for RESERVED_PORT in "${RESERVED_PORTS[@]}"
do
@ -845,21 +848,11 @@ check_docker_version () {
done
}
install_docker_using_script () {
if ! command_exists curl ; then
install_service curl
fi
curl -fsSL https://get.docker.com -o get-docker.sh
sh get-docker.sh
rm get-docker.sh
}
install_docker () {
if [ "${DIST}" == "Ubuntu" ] || [ "${DIST}" == "Debian" ] || [[ "${DIST}" == CentOS* ]] || [ "${DIST}" == "Fedora" ]; then
install_docker_using_script
curl -fsSL https://get.docker.com | bash
systemctl start docker
systemctl enable docker
@ -905,7 +898,7 @@ install_docker () {
fi
if ! command_exists docker ; then
if ! is_command_exists docker ; then
echo "error while installing docker"
exit 1;
fi
@ -948,30 +941,6 @@ read_continue_installation () {
}
domain_check () {
if ! command_exists dig; then
if command_exists apt-get; then
install_service dig dnsutils
elif command_exists yum; then
install_service dig bind-utils
fi
fi
if ! command_exists ping; then
if command_exists apt-get; then
install_service ping iputils-ping
elif command_exists yum; then
install_service ping iputils
fi
fi
if ! command_exists ip; then
if command_exists apt-get; then
install_service ip iproute2
elif command_exists yum; then
install_service ip iproute
fi
fi
APP_DOMAIN_PORTAL=${LETS_ENCRYPT_DOMAIN:-${APP_URL_PORTAL:-$(get_env_parameter "APP_URL_PORTAL" "${PACKAGE_SYSNAME}-files" | awk -F[/:] '{if ($1 == "https") print $4; else print ""}')}}
while IFS= read -r DOMAIN; do
@ -1023,7 +992,7 @@ get_env_parameter () {
exit 1;
fi
if command_exists docker ; then
if is_command_exists docker ; then
[ -n "$CONTAINER_NAME" ] && CONTAINER_EXIST=$(docker ps -aqf "name=$CONTAINER_NAME");
if [[ -n ${CONTAINER_EXIST} ]]; then
@ -1038,74 +1007,47 @@ get_env_parameter () {
echo ${VALUE//\"}
}
get_available_version () {
if [[ -z "$1" ]]; then
echo "image name is empty";
exit 1;
fi
if ! command_exists curl ; then
install_curl;
fi
CREDENTIALS="";
AUTH_HEADER="";
TAGS_RESP="";
get_tag_from_hub () {
if [[ -n ${HUB} ]]; then
DOCKER_CONFIG="$HOME/.docker/config.json";
if [[ -f "$DOCKER_CONFIG" ]]; then
CREDENTIALS=$(jq -r '.auths."'$HUB'".auth' < "$DOCKER_CONFIG");
if [ "$CREDENTIALS" == "null" ]; then
CREDENTIALS="";
fi
if [[ -n ${USERNAME} && -n ${PASSWORD} ]]; then
CREDENTIALS=$(echo -n "$USERNAME:$PASSWORD" | base64)
elif [[ -f "$HOME/.docker/config.json" ]]; then
CREDENTIALS=$(jq -r --arg hub "${HUB}" '.auths | to_entries[] | select(.key | contains($hub)).value.auth // empty' "$HOME/.docker/config.json")
fi
if [[ -z ${CREDENTIALS} && -n ${USERNAME} && -n ${PASSWORD} ]]; then
CREDENTIALS=$(echo -n "$USERNAME:$PASSWORD" | base64);
fi
[[ -n ${CREDENTIALS} ]] && AUTH_HEADER="Authorization: Basic $CREDENTIALS"
if [[ -n ${CREDENTIALS} ]]; then
AUTH_HEADER="Authorization: Basic $CREDENTIALS";
fi
REPO=$(echo $1 | sed "s/$HUB\///g");
TAGS_RESP=$(curl -s -H "$AUTH_HEADER" -X GET https://$HUB/v2/$REPO/tags/list);
TAGS_RESP=$(echo $TAGS_RESP | jq -r '.tags')
HUB_URL="https://${HUB}/v2/${1/#$HUB\//}/tags/list"
JQ_FILTER='.tags | join("\n")'
else
if [[ -n ${USERNAME} && -n ${PASSWORD} ]]; then
CREDENTIALS="{\"username\":\"$USERNAME\",\"password\":\"$PASSWORD\"}";
fi
if [[ -n ${CREDENTIALS} ]]; then
LOGIN_RESP=$(curl -s -H "Content-Type: application/json" -X POST -d "$CREDENTIALS" https://hub.docker.com/v2/users/login/);
TOKEN=$(echo $LOGIN_RESP | jq -r '.token');
AUTH_HEADER="Authorization: JWT $TOKEN";
CREDENTIALS="{\"username\":\"$USERNAME\",\"password\":\"$PASSWORD\"}"
TOKEN=$(curl -s -H "Content-Type: application/json" -X POST -d "$CREDENTIALS" https://hub.docker.com/v2/users/login/ | jq -r '.token');
AUTH_HEADER="Authorization: JWT $TOKEN"
sleep 1;
fi
TAGS_RESP=$(curl -s -H "$AUTH_HEADER" -X GET https://hub.docker.com/v2/repositories/$1/tags/);
TAGS_RESP=$(echo $TAGS_RESP | jq -r '.results[].name')
HUB_URL="https://hub.docker.com/v2/repositories/${1}/tags/"
JQ_FILTER='.results[].name // empty'
fi
VERSION_REGEX="[0-9]+\.[0-9]+\.[0-9]+(\.[0-9]+)?$"
TAGS_RESP=($(curl -s -H "${AUTH_HEADER}" -X GET "${HUB_URL}" | jq -r "${JQ_FILTER}"))
}
TAG_LIST=""
get_available_version () {
[ "${OFFLINE_INSTALLATION}" = "false" ] && get_tag_from_hub ${1} || TAGS_RESP=$(docker images --format "{{.Tag}}" ${1})
for item in $TAGS_RESP
do
if [[ $item =~ $VERSION_REGEX ]]; then
TAG_LIST="$item,$TAG_LIST"
fi
done
LATEST_TAG=$(echo $TAG_LIST | tr ',' '\n' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n | awk '/./{line=$0} END{print line}');
VERSION_REGEX='^[0-9]+\.[0-9]+(\.[0-9]+){0,2}$'
[ ${#TAGS_RESP[@]} -eq 1 ] && LATEST_TAG="${TAGS_RESP[0]}" || LATEST_TAG=$(printf "%s\n" "${TAGS_RESP[@]}" | grep -E "$VERSION_REGEX" | sort -V | tail -n 1)
if [ ! -z "${LATEST_TAG}" ]; then
echo "${LATEST_TAG}" | sed "s/\"//g"
else
if [ "${OFFLINE_INSTALLATION}" = "false" ]; then
echo "Unable to retrieve tag from ${1} repository" >&2
else
echo "Error: The image '${1}' is not found in the local Docker registry." >&2
fi
kill -s TERM $PID
fi
}
@ -1151,6 +1093,8 @@ set_mysql_params () {
}
set_docspace_params() {
HUB=${HUB:-$(get_env_parameter "HUB")};
ENV_EXTENSION=${ENV_EXTENSION:-$(get_env_parameter "ENV_EXTENSION" "${CONTAINER_NAME}")};
APP_CORE_BASE_DOMAIN=${APP_CORE_BASE_DOMAIN:-$(get_env_parameter "APP_CORE_BASE_DOMAIN" "${CONTAINER_NAME}")};
EXTERNAL_PORT=${EXTERNAL_PORT:-$(get_env_parameter "EXTERNAL_PORT" "${CONTAINER_NAME}")};
@ -1188,46 +1132,37 @@ set_installation_type_data () {
}
download_files () {
if ! command_exists jq ; then
if command_exists yum; then
rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-$REV.noarch.rpm
fi
install_service jq
fi
if ! command_exists docker-compose; then
install_docker_compose
fi
# Fixes issues with variables when upgrading to v1.1.3
HOSTS=("ELK_HOST" "REDIS_HOST" "RABBIT_HOST" "MYSQL_HOST");
for HOST in "${HOSTS[@]}"; do [[ "${!HOST}" == *CONTAINER_PREFIX* || "${!HOST}" == *$PACKAGE_SYSNAME* ]] && export "$HOST="; done
[[ "${APP_URL_PORTAL}" == *${PACKAGE_SYSNAME}-proxy* ]] && APP_URL_PORTAL=""
echo -n "Downloading configuration files to the ${BASE_DIR} directory..."
if ! command_exists tar; then
install_service tar
fi
[ "${OFFLINE_INSTALLATION}" = "false" ] && echo -n "Downloading configuration files to ${BASE_DIR}..." || echo "Unzip docker.tar.gz to ${BASE_DIR}..."
[ -d "${BASE_DIR}" ] && rm -rf "${BASE_DIR}"
mkdir -p ${BASE_DIR}
if [ "${OFFLINE_INSTALLATION}" = "false" ]; then
if [ -z "${GIT_BRANCH}" ]; then
curl -sL -o docker.tar.gz "https://download.${PACKAGE_SYSNAME}.com/${PRODUCT}/docker.tar.gz"
tar -xf docker.tar.gz -C ${BASE_DIR}
DOWNLOAD_URL="https://download.${PACKAGE_SYSNAME}.com/${PRODUCT}/docker.tar.gz"
else
curl -sL -o docker.tar.gz "https://github.com/${PACKAGE_SYSNAME}/${PRODUCT}-buildtools/archive/${GIT_BRANCH}.tar.gz"
tar -xf docker.tar.gz --strip-components=3 -C ${BASE_DIR} --wildcards '*/install/docker/*'
DOWNLOAD_URL="https://github.com/${PACKAGE_SYSNAME}/${PRODUCT}-buildtools/archive/${GIT_BRANCH}.tar.gz"
STRIP_COMPONENTS="--strip-components=3 --wildcards */install/docker/*"
fi
rm -rf docker.tar.gz
curl -sL "${DOWNLOAD_URL}" | tar -xzf - -C "${BASE_DIR}" ${STRIP_COMPONENTS}
else
if [ -f "$(dirname "$0")/docker.tar.gz" ]; then
tar -xf $(dirname "$0")/docker.tar.gz -C "${BASE_DIR}"
else
echo "Error: docker.tar.gz not found in the same directory as the script."
echo "You need to download the docker.tar.gz file from https://download.${PACKAGE_SYSNAME}.com/${PRODUCT}/docker.tar.gz"
exit 1
fi
fi
echo "OK"
reconfigure STATUS ${STATUS}
reconfigure INSTALLATION_TYPE ${INSTALLATION_TYPE}
reconfigure NETWORK_NAME ${NETWORK_NAME}
}
reconfigure () {
@ -1245,7 +1180,6 @@ install_mysql_server () {
reconfigure MYSQL_USER ${MYSQL_USER}
reconfigure MYSQL_PASSWORD ${MYSQL_PASSWORD}
reconfigure MYSQL_ROOT_PASSWORD ${MYSQL_ROOT_PASSWORD}
reconfigure MYSQL_VERSION ${MYSQL_VERSION}
if [[ -z ${MYSQL_HOST} ]] && [ "$INSTALL_MYSQL_SERVER" == "true" ]; then
docker-compose -f $BASE_DIR/db.yml up -d
@ -1261,7 +1195,6 @@ install_mysql_server () {
install_document_server () {
reconfigure DOCUMENT_SERVER_JWT_HEADER ${DOCUMENT_SERVER_JWT_HEADER}
reconfigure DOCUMENT_SERVER_JWT_SECRET ${DOCUMENT_SERVER_JWT_SECRET}
reconfigure DOCUMENT_SERVER_IMAGE_NAME "${DOCUMENT_SERVER_IMAGE_NAME}:${DOCUMENT_SERVER_VERSION:-$(get_available_version "$DOCUMENT_SERVER_IMAGE_NAME")}"
if [[ -z ${DOCUMENT_SERVER_HOST} ]] && [ "$INSTALL_DOCUMENT_SERVER" == "true" ]; then
docker-compose -f $BASE_DIR/ds.yml up -d
elif [ "$INSTALL_DOCUMENT_SERVER" == "pull" ]; then
@ -1304,7 +1237,6 @@ install_redis () {
}
install_elasticsearch () {
reconfigure ELK_VERSION ${ELK_VERSION}
if [[ -z ${ELK_HOST} ]] && [ "$INSTALL_ELASTICSEARCH" == "true" ]; then
if [ $(free --mega | grep -oP '\d+' | head -n 1) -gt "12000" ]; then #RAM ~12Gb
sed -i 's/Xms[0-9]g/Xms4g/g; s/Xmx[0-9]g/Xmx4g/g' $BASE_DIR/opensearch.yml
@ -1324,14 +1256,6 @@ install_elasticsearch () {
install_fluent_bit () {
if [ "$INSTALL_FLUENT_BIT" == "true" ]; then
if ! command_exists crontab; then
if command_exists apt-get; then
install_service crontab cron
elif command_exists yum; then
install_service crontab cronie
fi
fi
[ ! -z "$ELK_HOST" ] && sed -i "s/ELK_CONTAINER_NAME/ELK_HOST/g" $BASE_DIR/fluent.yml ${BASE_DIR}/dashboards.yml
OPENSEARCH_INDEX="${OPENSEARCH_INDEX:-"${PACKAGE_SYSNAME}-fluent-bit"}"
@ -1343,6 +1267,7 @@ install_fluent_bit () {
sed -i "s/OPENSEARCH_HOST/${ELK_HOST:-"${PACKAGE_SYSNAME}-opensearch"}/g" "${BASE_DIR}/config/fluent-bit.conf"
sed -i "s/OPENSEARCH_PORT/$(get_env_parameter "ELK_PORT")/g" ${BASE_DIR}/config/fluent-bit.conf
sed -i "s/OPENSEARCH_INDEX/${OPENSEARCH_INDEX}/g" ${BASE_DIR}/config/fluent-bit.conf
[ "$ELK_SCHEME" = "https" ] && sed -i '/tls/s/^#//' ${BASE_DIR}/config/fluent-bit.conf
reconfigure DASHBOARDS_USERNAME "${DASHBOARDS_USERNAME:-"${PACKAGE_SYSNAME}"}"
reconfigure DASHBOARDS_PASSWORD "${DASHBOARDS_PASSWORD:-$(get_random_str 20)}"
@ -1354,8 +1279,6 @@ install_fluent_bit () {
}
install_product () {
DOCKER_TAG="${DOCKER_TAG:-$(get_available_version ${IMAGE_NAME})}"
reconfigure DOCKER_TAG ${DOCKER_TAG}
if [ "$INSTALL_PRODUCT" == "true" ]; then
[ "${UPDATE}" = "true" ] && LOCAL_CONTAINER_TAG="$(docker inspect --format='{{index .Config.Image}}' ${CONTAINER_NAME} | awk -F':' '{print $2}')"
@ -1371,13 +1294,17 @@ install_product () {
reconfigure APP_URL_PORTAL "${APP_URL_PORTAL:-"http://${PACKAGE_SYSNAME}-router:8092"}"
reconfigure EXTERNAL_PORT ${EXTERNAL_PORT}
if [[ -z ${MYSQL_HOST} ]] && [ "$INSTALL_MYSQL_SERVER" == "true" ]; then
if [[ -z ${MYSQL_HOST} ]] && [ "$INSTALL_MYSQL_SERVER" == "true" ] && [[ -n $(docker ps -q --filter "name=${PACKAGE_SYSNAME}-mysql-server") ]]; then
echo -n "Waiting for MySQL container to become healthy..."
(timeout 30 bash -c "while ! docker inspect --format '{{json .State.Health.Status }}' ${PACKAGE_SYSNAME}-mysql-server | grep -q 'healthy'; do sleep 1; done") && echo "OK" || (echo "FAILED")
fi
docker-compose -f $BASE_DIR/migration-runner.yml up -d
echo -n "Waiting for database migration to complete..." && docker wait ${PACKAGE_SYSNAME}-migration-runner && echo "OK"
if [[ -n $(docker ps -q --filter "name=${PACKAGE_SYSNAME}-migration-runner") ]]; then
echo -n "Waiting for database migration to complete..."
timeout 30 bash -c "while [ $(docker wait ${PACKAGE_SYSNAME}-migration-runner) -ne 0 ]; do sleep 1; done;" && echo "OK" || echo "FAILED"
fi
docker-compose -f $BASE_DIR/${PRODUCT}.yml up -d
docker-compose -f ${PROXY_YML} up -d
docker-compose -f $BASE_DIR/notify.yml up -d
@ -1427,6 +1354,80 @@ make_swap () {
fi
}
offline_check_docker_image() {
[ ! -f "$1" ] && { echo "Error: File '$1' does not exist."; exit 1; }
docker-compose -f "$1" config | grep -oP 'image:\s*\K\S+' | while IFS= read -r IMAGE_TAG; do
docker images "${IMAGE_TAG}" | grep -q "${IMAGE_TAG%%:*}" || { echo "Error: The image '${IMAGE_TAG}' is not found in the local Docker registry."; kill -s TERM $PID; }
done
}
check_hub_connection() {
get_tag_from_hub ${IMAGE_NAME}
[ -z "$TAGS_RESP" ] && { echo -e "Unable to download tags from ${HUB:-hub.docker.com}.\nTry specifying another dockerhub name using -hub"; exit 1; } || true
}
dependency_installation() {
is_command_exists apt-get && apt-get -y update -qq
install_package tar
install_package curl
install_package netstat net-tools
if [ "${OFFLINE_INSTALLATION}" = "false" ]; then
install_package dig "dnsutils|bind-utils"
install_package ping "iputils-ping|iputils"
install_package ip "iproute2|iproute"
fi
[ "$INSTALL_FLUENT_BIT" = "true" ] && install_package crontab "cron|cronie"
if ! is_command_exists jq ; then
if is_command_exists yum && ! rpm -q epel-release > /dev/null 2>&1; then
[ "${OFFLINE_INSTALLATION}" = "false" ] && rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-${REV}.noarch.rpm
fi
install_package jq
fi
is_command_exists docker && { check_docker_version; service docker start; } || { [ "${OFFLINE_INSTALLATION}" = "false" ] && install_docker || { echo "docker not installed"; exit 1; }; }
if ! is_command_exists docker-compose; then
[ "${OFFLINE_INSTALLATION}" = "false" ] && install_docker_compose || { echo "docker-compose not installed"; exit 1; }
elif [ "$(docker-compose --version | grep -oP '(?<=v)\d+\.\d+'| sed 's/\.//')" -lt "21" ]; then
[ "$OFFLINE_INSTALLATION" = "false" ] && install_docker_compose || { echo "docker-compose version is outdated"; exit 1; }
fi
}
check_docker_image () {
reconfigure HUB "${HUB%/}${HUB:+/}"
reconfigure STATUS ${STATUS}
reconfigure INSTALLATION_TYPE ${INSTALLATION_TYPE}
reconfigure NETWORK_NAME ${NETWORK_NAME}
reconfigure MYSQL_VERSION ${MYSQL_VERSION}
reconfigure ELK_VERSION ${ELK_VERSION}
reconfigure DOCUMENT_SERVER_IMAGE_NAME "${DOCUMENT_SERVER_IMAGE_NAME}:\${DOCUMENT_SERVER_VERSION}"
reconfigure DOCUMENT_SERVER_VERSION ${DOCUMENT_SERVER_VERSION:-$(get_available_version "$DOCUMENT_SERVER_IMAGE_NAME")}
DOCKER_TAG="${DOCKER_TAG:-$(get_available_version ${IMAGE_NAME})}"
reconfigure DOCKER_TAG ${DOCKER_TAG}
if [ "${OFFLINE_INSTALLATION}" != "false" ]; then
[ "$INSTALL_RABBITMQ" == "true" ] && offline_check_docker_image ${BASE_DIR}/db.yml
[ "$INSTALL_RABBITMQ" == "true" ] && offline_check_docker_image ${BASE_DIR}/rabbitmq.yml
[ "$INSTALL_REDIS" == "true" ] && offline_check_docker_image ${BASE_DIR}/redis.yml
[ "$INSTALL_FLUENT_BIT" == "true" ] && offline_check_docker_image ${BASE_DIR}/fluent.yml
[ "$INSTALL_FLUENT_BIT" == "true" ] && offline_check_docker_image ${BASE_DIR}/dashboards.yml
[ "$INSTALL_ELASTICSEARCH" == "true" ] && offline_check_docker_image ${BASE_DIR}/opensearch.yml
[ "$INSTALL_DOCUMENT_SERVER" == "true" ] && offline_check_docker_image ${BASE_DIR}/ds.yml
if [ "$INSTALL_PRODUCT" == "true" ]; then
offline_check_docker_image ${BASE_DIR}/migration-runner.yml
offline_check_docker_image ${BASE_DIR}/${PRODUCT}.yml
offline_check_docker_image ${BASE_DIR}/notify.yml
offline_check_docker_image ${BASE_DIR}/healthchecks.yml
offline_check_docker_image ${PROXY_YML}
fi
fi
}
start_installation () {
root_checking
@ -1437,6 +1438,8 @@ start_installation () {
check_os_info
check_kernel
dependency_installation
if [ "$UPDATE" != "true" ]; then
check_ports
fi
@ -1449,18 +1452,13 @@ start_installation () {
make_swap
fi
if command_exists docker ; then
check_docker_version
service docker start
else
install_docker
fi
docker_login
[ "${OFFLINE_INSTALLATION}" = "false" ] && check_hub_connection
create_network
domain_check
[ "${OFFLINE_INSTALLATION}" = "false" ] && domain_check
if [ "$UPDATE" = "true" ]; then
set_docspace_params
@ -1476,6 +1474,8 @@ start_installation () {
download_files
check_docker_image
install_elasticsearch
install_fluent_bit

View File

@ -39,7 +39,7 @@ yum localinstall -y --nogpgcheck https://download1.rpmfusion.org/free/$RPMFUSION
[ "$REV" = "9" ] && update-crypto-policies --set DEFAULT:SHA1 && ${package_manager} -y install xorg-x11-font-utils
[ "$DIST" = "centos" ] && TESTING_REPO="--enablerepo=$( [ "$REV" = "9" ] && echo "crb" || echo "powertools" )"
[ "$DIST" = "redhat" ] && /usr/bin/crb enable
[ "$DIST" = "redhat" ] && { /usr/bin/crb enable && yum repolist enabled | grep -qi -e crb -e codeready || echo "Failed to enable or verify CRB repository."; exit 1; }
#add rabbitmq & erlang repo
curl -s https://packagecloud.io/install/repositories/rabbitmq/rabbitmq-server/script.rpm.sh | bash

View File

@ -71,8 +71,9 @@ read_unsupported_installation () {
esac
}
DIST=$(rpm -q --queryformat '%{NAME}' centos-release redhat-release fedora-release | awk -F'[- ]|package' '{print tolower($1)}' | tr -cd '[:alpha:]')
[ -z $DIST ] && DIST=$(cat /etc/redhat-release | awk -F 'Linux|release| ' '{print tolower($1)}')
DIST=$(rpm -qa --queryformat '%{NAME}\n' | grep -E 'centos-release|redhat-release|fedora-release' | awk -F '-' '{print $1}' | head -n 1)
DIST=${DIST:-$(awk -F= '/^ID=/ {gsub(/"/, "", $2); print tolower($2)}' /etc/os-release)};
[[ "$DIST" =~ ^(centos|redhat|fedora)$ ]] || DIST="centos"
REV=$(sed -n 's/.*release\ \([0-9]*\).*/\1/p' /etc/redhat-release)
REV=${REV:-"7"}

View File

@ -0,0 +1,29 @@
#!/bin/bash
set -e
[ "$(id -u)" -ne 0 ] && { echo "To perform this action you must be logged in with root rights"; exit 1; }
TEMP_DIR=$(mktemp -d)
trap 'echo "Cleaning up temporary files..."; rm -rf "${TEMP_DIR}"' EXIT
! type docker &> /dev/null && { echo "docker not installed"; exit 1; }
! type docker-compose &> /dev/null && { echo "docker-compose not installed"; exit 1; }
echo "Extracting docker images to ${TEMP_DIR}..."
tail -n +$(awk '/^__END_OF_SHELL_SCRIPT__$/{print NR + 1; exit 0;}' "$0") "$0" | tar x -C "${TEMP_DIR}"
echo "Loading docker images..."
docker load -i ${TEMP_DIR}/docker_images.tar.xz
echo "Extracting OneClickInstall files to the current directory..."
mv -f ${TEMP_DIR}/docker.tar.gz $(dirname "$0")/docker.tar.gz
mv -f ${TEMP_DIR}/install-Docker.sh $(dirname "$0")/install-Docker.sh
echo "Running the install-Docker.sh script..."
chmod +x $(dirname "$0")/install-Docker.sh
$(dirname "$0")/install-Docker.sh
exit 0
__END_OF_SHELL_SCRIPT__

View File

@ -79,7 +79,7 @@ override_dh_auto_build: check_archives
sed -E 's_(http://)[^:]+(:5601)_\1localhost\2_g' -i ${BUILDTOOLS_PATH}/config/nginx/onlyoffice.conf
sed 's/teamlab.info/onlyoffice.com/g' -i ${BUILDTOOLS_PATH}/config/autofac.consumers.json
json -I -f ${CLENT_PATH}/public/scripts/config.json -e "this.wrongPortalNameUrl=\"\""
sed -e 's/$$router_host/127.0.0.1/g' -e 's/this_host\|proxy_x_forwarded_host/host/g' -e 's/proxy_x_forwarded_proto/scheme/g' -e 's/proxy_x_forwarded_port/server_port/g' -e 's_includes_/etc/openresty/includes_g' -e '/quic\|alt-svc/Id' -i ${BUILDTOOLS_PATH}/install/docker/config/nginx/onlyoffice-proxy*.conf
sed -e 's/$$router_host/127.0.0.1/g' -e 's/this_host\|proxy_x_forwarded_host/host/g' -e 's/proxy_x_forwarded_proto/scheme/g' -e 's_includes_/etc/openresty/includes_g' -e '/quic\|alt-svc/Id' -i ${BUILDTOOLS_PATH}/install/docker/config/nginx/onlyoffice-proxy*.conf
sed "s_\(.*root\).*;_\1 \"/var/www/${PRODUCT}\";_g" -i ${BUILDTOOLS_PATH}/install/docker/config/nginx/letsencrypt.conf
sed -e '/.pid/d' -e '/temp_path/d' -e 's_etc/nginx_etc/openresty_g' -e 's/\.log/-openresty.log/g' -i ${BUILDTOOLS_PATH}/install/docker/config/nginx/templates/nginx.conf.template
mv -f ${BUILDTOOLS_PATH}/install/docker/config/nginx/onlyoffice-proxy-ssl.conf ${BUILDTOOLS_PATH}/install/docker/config/nginx/onlyoffice-proxy-ssl.conf.template

View File

@ -1,30 +1,45 @@
# docker-compose tags #
HUB=""
PRODUCT=onlyoffice
REPO=${PRODUCT}
INSTALLATION_TYPE=COMMUNITY
STATUS=""
DOCKER_IMAGE_PREFIX=${STATUS}docspace
DOCKER_TAG=latest
CONTAINER_PREFIX=${PRODUCT}-
MYSQL_VERSION=8.3.0
MYSQL_IMAGE=mysql:${MYSQL_VERSION}
SERVICE_PORT=5050
DOCUMENT_SERVER_IMAGE_NAME=onlyoffice/4testing-documentserver-ee:latest
DOCKERFILE=Dockerfile.app
APP_DOTNET_ENV=""
EXTERNAL_PORT="80"
# opensearch stack #
# images version #
DOCKER_TAG=latest
MYSQL_VERSION=8.3.0
PROXY_VERSION=latest
REDIS_VERSION=7
RABBITMQ_VERSION=3
ELK_VERSION=2.11.1
FLUENT_BIT_VERSION=3.0.2
DASHBOARDS_VERSION=2.11.1
DOCUMENT_SERVER_VERSION=latest
# images name #
MYSQL_IMAGE=mysql:${MYSQL_VERSION}
PROXY_IMAGE_NAME=nginx:${PROXY_VERSION}
REDIS_IMAGE_NAME=redis:${REDIS_VERSION}
RABBITMQ_IMAGE_NAME=rabbitmq:${RABBITMQ_VERSION}
ELK_IMAGE_NAME=${REPO}/opensearch:${ELK_VERSION}
FLUENT_BIT_IMAGE_NAME=fluent/fluent-bit:${FLUENT_BIT_VERSION}
DASHBOARDS_IMAGE_NAME=opensearchproject/opensearch-dashboards:${DASHBOARDS_VERSION}
DOCUMENT_SERVER_IMAGE_NAME=${REPO}/4testing-documentserver-ee:${DOCUMENT_SERVER_VERSION}
# opensearch stack #
ELK_CONTAINER_NAME=${CONTAINER_PREFIX}opensearch
ELK_SHEME=http
ELK_HOST=""
ELK_PORT=9200
DASHBOARDS_VERSION=2.11.1
DASHBOARDS_CONTAINER_NAME=${CONTAINER_PREFIX}opensearch-dashboards
DASHBOARDS_USERNAME=onlyoffice
DASHBOARDS_PASSWORD=onlyoffice
FLUENT_BIT_VERSION=3.0.2
FLUENT_BIT_CONTAINER_NAME=${CONTAINER_PREFIX}fluent-bit
# app service environment #

View File

@ -52,7 +52,7 @@ RUN cd ${SRC_PATH} && \
cd ${SRC_PATH} && \
cp buildtools/config/*.config /app/onlyoffice/config/ && \
mkdir -p /etc/nginx/conf.d && cp -f buildtools/config/nginx/onlyoffice*.conf /etc/nginx/conf.d/ && \
mkdir -p /etc/nginx/includes/ && cp -f buildtools/config/nginx/includes/onlyoffice*.conf /etc/nginx/includes/ && \
mkdir -p /etc/nginx/includes/ && cp -f buildtools/config/nginx/includes/onlyoffice*.conf /etc/nginx/includes/ && cp -f buildtools/config/nginx/includes/server-*.conf /etc/nginx/includes/ && \
sed -i "s/\"number\".*,/\"number\": \"${PRODUCT_VERSION}.${BUILD_NUMBER}\",/g" /app/onlyoffice/config/appsettings.json && \
sed -e 's/#//' -i /etc/nginx/conf.d/onlyoffice.conf && \
cd ${SRC_PATH}/buildtools/install/common/ && \
@ -173,7 +173,7 @@ RUN sed -i 's/127.0.0.1:5010/$service_api_system/' /etc/nginx/conf.d/onlyoffice.
if [[ -z "${SERVICE_CLIENT}" ]] ; then sed -i 's/127.0.0.1:5001/$service_client/' /etc/nginx/conf.d/onlyoffice.conf; fi && \
if [[ -z "${SERVICE_MANAGEMENT}" ]] ; then sed -i 's/127.0.0.1:5015/$service_management/' /etc/nginx/conf.d/onlyoffice.conf; fi && \
sed -i 's/127.0.0.1:5033/$service_healthchecks/' /etc/nginx/conf.d/onlyoffice.conf && \
sed -i 's/127.0.0.1:5601/$dashboards_host:5601/' /etc/nginx/conf.d/onlyoffice.conf && \
sed -i 's/127.0.0.1:5601/$dashboards_host:5601/' /etc/nginx/includes/server-dashboards.conf && \
sed -i 's/$public_root/\/var\/www\/public\//' /etc/nginx/conf.d/onlyoffice.conf && \
sed -i 's/http:\/\/172.*/$document_server;/' /etc/nginx/conf.d/onlyoffice.conf && \
sed -i '/client_body_temp_path/ i \ \ \ \ $MAP_HASH_BUCKET_SIZE' /etc/nginx/nginx.conf.template && \

View File

@ -18,6 +18,7 @@
Match *
Host OPENSEARCH_HOST
Port OPENSEARCH_PORT
# tls On
Replace_Dots On
Suppress_Type_Name On
Compress gzip

View File

@ -1,7 +1,7 @@
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header Host $this_host;
proxy_set_header X-Forwarded-Host $proxy_x_forwarded_host:$proxy_x_forwarded_port;
proxy_set_header X-Forwarded-Host $proxy_x_forwarded_host;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_hide_header 'Server';

View File

@ -16,8 +16,8 @@ map $http_x_forwarded_host $proxy_x_forwarded_host {
}
map $http_x_forwarded_port $proxy_x_forwarded_port {
default $EXTERNAL_PORT;
~^(.*)$ $1;
default $http_x_forwarded_port;
'' $server_port;
}
map $http_upgrade $proxy_connection {

View File

@ -1,6 +1,6 @@
services:
onlyoffice-opensearch-dashboards:
image: opensearchproject/opensearch-dashboards:${DASHBOARDS_VERSION}
image: ${HUB}${DASHBOARDS_IMAGE_NAME}
container_name: ${DASHBOARDS_CONTAINER_NAME}
restart: always
environment:
@ -9,6 +9,12 @@ services:
- "SERVER_BASEPATH=/dashboards"
expose:
- "5601"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5601/api/status"]
interval: 10s
retries: 3
start_period: 10s
timeout: 10s
networks:
default:

View File

@ -1,6 +1,6 @@
services:
onlyoffice-mysql-server:
image: ${MYSQL_IMAGE}
image: ${HUB}${MYSQL_IMAGE}
cap_add:
- SYS_NICE
container_name: ${MYSQL_CONTAINER_NAME}
@ -17,9 +17,10 @@ services:
MYSQL_USER: ${MYSQL_USER}
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping --silent"]
test: ["CMD", "mysql", "-u", "${MYSQL_USER}", "--password=${MYSQL_PASSWORD}", "-e", ";"]
interval: 10s
timeout: 5s
start_period: 10s
retries: 3
volumes:
- mysql_data:/var/lib/mysql

View File

@ -150,7 +150,12 @@ services:
<<: *x-profiles-local
image: ${Baseimage_Dotnet_Run}
working_dir: ${BUILD_PATH}/studio/ASC.Web.Studio/
command: ["ASC.Web.Studio.dll", "ASC.Web.Studio"]
command:
[
"ASC.Web.Studio.dll",
"ASC.Web.Studio",
"core:eventBus:subscriptionClientName=asc_event_bus_webstudio_queue",
]
volumes:
- ${SRC_PATH}/ASC.Web.Studio/service:${BUILD_PATH}/studio/ASC.Web.Studio/
- ${SRC_PATH}/ASC.Files/service/:${BUILD_PATH}/products/ASC.Files/server/

View File

@ -61,7 +61,7 @@ x-service: &x-service-base
services:
onlyoffice-backup-background-tasks:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-backup-background:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-backup-background:${DOCKER_TAG}"
container_name: ${BACKUP_BACKGRUOND_TASKS_HOST}
healthcheck:
<<: *x-healthcheck
@ -69,7 +69,7 @@ services:
onlyoffice-backup:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-backup:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-backup:${DOCKER_TAG}"
container_name: ${BACKUP_HOST}
healthcheck:
<<: *x-healthcheck
@ -77,7 +77,7 @@ services:
onlyoffice-clear-events:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-clear-events:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-clear-events:${DOCKER_TAG}"
container_name: ${CLEAR_EVENTS_HOST}
healthcheck:
<<: *x-healthcheck
@ -85,7 +85,7 @@ services:
onlyoffice-files:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-files:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-files:${DOCKER_TAG}"
container_name: ${FILES_HOST}
healthcheck:
<<: *x-healthcheck
@ -93,7 +93,7 @@ services:
onlyoffice-files-services:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-files-services:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-files-services:${DOCKER_TAG}"
container_name: ${FILES_SERVICES_HOST}
healthcheck:
<<: *x-healthcheck
@ -101,7 +101,7 @@ services:
onlyoffice-people-server:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-people-server:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-people-server:${DOCKER_TAG}"
container_name: ${PEOPLE_SERVER_HOST}
healthcheck:
<<: *x-healthcheck
@ -109,14 +109,14 @@ services:
onlyoffice-socket:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-socket:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-socket:${DOCKER_TAG}"
container_name: ${SOCKET_HOST}
expose:
- ${SERVICE_PORT}
onlyoffice-studio-notify:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-studio-notify:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-studio-notify:${DOCKER_TAG}"
container_name: ${STUDIO_NOTIFY_HOST}
healthcheck:
<<: *x-healthcheck
@ -124,7 +124,7 @@ services:
onlyoffice-api:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-api:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-api:${DOCKER_TAG}"
container_name: ${API_HOST}
healthcheck:
<<: *x-healthcheck
@ -132,7 +132,7 @@ services:
onlyoffice-api-system:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-api-system:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-api-system:${DOCKER_TAG}"
container_name: ${API_SYSTEM_HOST}
healthcheck:
<<: *x-healthcheck
@ -140,7 +140,7 @@ services:
onlyoffice-studio:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-studio:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-studio:${DOCKER_TAG}"
container_name: ${STUDIO_HOST}
healthcheck:
<<: *x-healthcheck
@ -148,7 +148,7 @@ services:
onlyoffice-ssoauth:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-ssoauth:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-ssoauth:${DOCKER_TAG}"
container_name: ${SSOAUTH_HOST}
expose:
- ${SERVICE_PORT}
@ -156,7 +156,7 @@ services:
onlyoffice-doceditor:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-doceditor:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-doceditor:${DOCKER_TAG}"
container_name: ${DOCEDITOR_HOST}
expose:
- "5013"
@ -166,7 +166,7 @@ services:
onlyoffice-login:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-login:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-login:${DOCKER_TAG}"
container_name: ${LOGIN_HOST}
expose:
- "5011"
@ -175,7 +175,7 @@ services:
test: curl --fail http://${SERVICE_LOGIN}/login/health || exit 1
onlyoffice-router:
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-router:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-router:${DOCKER_TAG}"
container_name: ${ROUTER_HOST}
restart: always
healthcheck:

View File

@ -1,6 +1,6 @@
services:
onlyoffice-document-server:
image: "${DOCUMENT_SERVER_IMAGE_NAME}"
image: "${HUB}${DOCUMENT_SERVER_IMAGE_NAME}"
container_name: ${DOCUMENT_CONTAINER_NAME}
# Strings below enable the JSON Web Token validation.
environment:
@ -16,6 +16,12 @@ services:
stdin_open: true
restart: always
stop_grace_period: 60s
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/info/info.json"]
interval: 30s
retries: 5
start_period: 60s
timeout: 10s
networks:
default:

View File

@ -1,6 +1,6 @@
services:
fluent-bit:
image: fluent/fluent-bit:${FLUENT_BIT_VERSION}
image: ${HUB}${FLUENT_BIT_IMAGE_NAME}
container_name: ${FLUENT_BIT_CONTAINER_NAME}
restart: always
environment:

View File

@ -13,7 +13,7 @@ x-service:
services:
onlyoffice-health-checks-ui:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-healthchecks:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-healthchecks:${DOCKER_TAG}"
container_name: ${HELTHCHECKS_HOST}
networks:

View File

@ -1,8 +1,8 @@
services:
onlyoffice-migration-runner:
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-migration-runner:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-migration-runner:${DOCKER_TAG}"
container_name: ${MIGRATION_RUNNER_HOST}
restart: "no"
restart: "on-failure"
environment:
MYSQL_CONTAINER_NAME: ${MYSQL_CONTAINER_NAME}
MYSQL_HOST: ${MYSQL_HOST}

View File

@ -61,7 +61,7 @@ x-service:
services:
onlyoffice-notify:
<<: *x-service-base
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-notify:${DOCKER_TAG}"
image: "${HUB}${REPO}/${DOCKER_IMAGE_PREFIX}-notify:${DOCKER_TAG}"
container_name: ${NOTIFY_HOST}
healthcheck:
<<: *x-healthcheck

View File

@ -1,6 +1,6 @@
services:
onlyoffice-opensearch:
image: onlyoffice/opensearch:${ELK_VERSION}
image: ${HUB}${ELK_IMAGE_NAME}
container_name: ${ELK_CONTAINER_NAME}
restart: always
environment:
@ -25,6 +25,12 @@ services:
- "9600" # required for Performance Analyzer
ports:
- 127.0.0.1:9200:9200
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9200/_cluster/health?pretty"]
interval: 30s
retries: 3
start_period: 10s
timeout: 10s
networks:
default:

View File

@ -8,7 +8,7 @@ x-healthcheck:
services:
onlyoffice-proxy:
image: nginx
image: ${HUB}${PROXY_IMAGE_NAME}
container_name: ${PROXY_HOST}
restart: always
healthcheck:
@ -20,7 +20,6 @@ services:
- 443:443/udp
environment:
- ROUTER_HOST=${ROUTER_HOST}
- EXTERNAL_PORT=${EXTERNAL_PORT}
volumes:
- webroot_path:/letsencrypt
- log_data:/var/log/nginx

View File

@ -8,7 +8,7 @@ x-healthcheck:
services:
onlyoffice-proxy:
image: nginx
image: ${HUB}${PROXY_IMAGE_NAME}
container_name: ${PROXY_HOST}
restart: always
healthcheck:
@ -18,7 +18,6 @@ services:
- ${EXTERNAL_PORT}:80
environment:
- ROUTER_HOST=${ROUTER_HOST}
- EXTERNAL_PORT=${EXTERNAL_PORT}
volumes:
- webroot_path:/letsencrypt
- log_data:/var/log/nginx

View File

@ -1,11 +1,18 @@
services:
onlyoffice-rabbitmq:
image: rabbitmq:3
image: ${HUB}${RABBITMQ_IMAGE_NAME}
container_name: ${RABBIT_CONTAINER_NAME}
restart: always
expose:
- "5672"
- "80"
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "status"]
interval: 10s
retries: 3
start_period: 10s
timeout: 10s
networks:
default:
name: ${NETWORK_NAME}

View File

@ -1,10 +1,17 @@
services:
onlyoffice-redis:
image: redis:7
image: ${HUB}${REDIS_IMAGE_NAME}
container_name: ${REDIS_CONTAINER_NAME}
restart: always
expose:
- "6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
retries: 3
start_period: 10s
timeout: 10s
networks:
default:
name: ${NETWORK_NAME}

View File

@ -27,7 +27,7 @@ sed 's/teamlab.info/onlyoffice.com/g' -i config/autofac.consumers.json
sed -e 's_etc/nginx_etc/openresty_g' -e 's/listen\s\+\([0-9]\+\);/listen 127.0.0.1:\1;/g' -i config/nginx/*.conf
sed -i "s#\$public_root#/var/www/%{product}/public/#g" config/nginx/onlyoffice.conf
sed -E 's_(http://)[^:]+(:5601)_\1localhost\2_g' -i config/nginx/onlyoffice.conf
sed -e 's/$router_host/127.0.0.1/g' -e 's/this_host\|proxy_x_forwarded_host/host/g' -e 's/proxy_x_forwarded_proto/scheme/g' -e 's/proxy_x_forwarded_port/server_port/g' -e 's_includes_/etc/openresty/includes_g' -e '/quic\|alt-svc/Id' -i install/docker/config/nginx/onlyoffice-proxy*.conf
sed -e 's/$router_host/127.0.0.1/g' -e 's/this_host\|proxy_x_forwarded_host/host/g' -e 's/proxy_x_forwarded_proto/scheme/g' -e 's_includes_/etc/openresty/includes_g' -e '/quic\|alt-svc/Id' -i install/docker/config/nginx/onlyoffice-proxy*.conf
sed -e '/.pid/d' -e '/temp_path/d' -e 's_etc/nginx_etc/openresty_g' -e 's/\.log/-openresty.log/g' -i install/docker/config/nginx/templates/nginx.conf.template
sed -i "s_\(.*root\).*;_\1 \"/var/www/%{product}\";_g" -i install/docker/config/nginx/letsencrypt.conf
sed -i "s#\(/var/log/onlyoffice/\)#\1%{product}#" install/docker/config/fluent-bit.conf