This commit is contained in:
SonTV 2023-06-15 11:00:55 +07:00
parent 085892725d
commit 078372f838
7 changed files with 269 additions and 0 deletions

35
Dockerfile Normal file
View File

@ -0,0 +1,35 @@
FROM alpine:3.12
LABEL maintainer "Fco. Javier Delgado del Hoyo <frandelhoyo@gmail.com>"
RUN apk add --update tzdata bash mariadb-client gzip openssl && rm -rf /var/cache/apk/*
ARG OS=alpine-linux
ARG ARCH=amd64
ARG DOCKERIZE_VERSION=v0.6.1
RUN wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-$OS-$ARCH-$DOCKERIZE_VERSION.tar.gz \
&& tar -C /usr/local/bin -xzvf dockerize-$OS-$ARCH-$DOCKERIZE_VERSION.tar.gz \
&& rm dockerize-$OS-$ARCH-$DOCKERIZE_VERSION.tar.gz
RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/main' >> /etc/apk/repositories
RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.6/community' >> /etc/apk/repositories
RUN apk update
RUN apk add --no-cache mongodb
RUN apk add --no-cache mongodb-tools
RUN mongo --version
RUN apk --no-cache add curl
# ENV CRON_TIME="0 3 * * sun" \
# MYSQL_HOST="mysql" \
# MYSQL_PORT="3306" \
# TIMEOUT="10s"
COPY ["run.sh", "backup.sh", "restore.sh", "backup_mongo.sh", "restore_mongo.sh","/"]
RUN mkdir -p /backup /backup_mongo
RUN mkdir -p /logs
RUN chmod u+x /backup.sh /restore.sh /backup_mongo.sh /restore_mongo.sh
VOLUME ["/backup", "/backup_mongo"]
CMD dockerize -wait tcp://${MYSQL_HOST}:${MYSQL_PORT} -timeout ${TIMEOUT}
# CMD dockerize -wait tcp://${MYSQL_HOST}:${MYSQL_PORT} -timeout ${TIMEOUT} /run.sh
ENTRYPOINT ["sh", "/run.sh"]

60
backup.sh Normal file
View File

@ -0,0 +1,60 @@
#!/bin/bash
[ -z "${MYSQL_USER}" ] && { echo "=> MYSQL_USER cannot be empty" && exit 1; }
[ -z "${MYSQL_PASS:=$MYSQL_PASSWORD}" ] && { echo "=> MYSQL_PASS cannot be empty" && exit 1; }
[ -z "${GZIP_LEVEL}" ] && { GZIP_LEVEL=6; }
DATE=$(date +%Y%m%d%H%M)
date=$(date +%Y%m%d)
dateValue=`date -R`
contentType="application/x-compressed-tar"
echo "=> [Backup MYSQL] started at $(date "+%Y-%m-%d %H:%M:%S")"
DATABASES=${MYSQL_DATABASE:-${MYSQL_DB:-$(mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER_ROOT" -p"$MYSQL_ROOT_PASSWORD" -e "SHOW DATABASES;" | tr -d "| " | grep -v Database)}}
DB_COUNTER=0
for db in ${DATABASES}
do
if [[ "$db" != "information_schema" ]] && [[ "$db" != "performance_schema" ]] && [[ "$db" != "mysql" ]] && [[ "$db" != "sys" ]] && [[ "$db" != _* ]]
then
echo "==> Dumping database: $db"
FILENAME=/backup/$DATE.$db.sql
LATEST=/backup/latest.$db.sql.gz
if mysqldump --no-tablespaces -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER_ROOT" -p"$MYSQL_ROOT_PASSWORD" $db $MYSQLDUMP_OPTS > "$FILENAME"
then
gzip "-$GZIP_LEVEL" -f "$FILENAME"
if [ -n "$S3_BACKUP" ]
then
fname=$(basename "$FILENAME".gz)
echo "Start sending $fname to S3"
resource="/${S3_BUCKET}/${S3_LOCATION}/${date}/${db}/${fname}"
stringToSign="PUT\n\n${contentType}\n${dateValue}\n${resource}"
signature=`echo -en ${stringToSign} | openssl sha1 -hmac ${S3_SECRET} -binary | base64`
curl -X PUT -T "${FILENAME}.gz" \
-H "Host: ${S3_BUCKET}.s3.amazonaws.com" \
-H "Date: ${dateValue}" \
-H "Content-Type: ${contentType}" \
-H "Authorization: AWS ${S3_KEY}:${signature}" \
https://${S3_BUCKET}.s3.amazonaws.com/${S3_LOCATION}/${date}/${db}/${fname}
echo "$fname has been sent to S3 successfully."
fi
echo "==> Creating symlink to latest backup: $(basename "$FILENAME".gz)"
rm "$LATEST" 2> /dev/null
cd /backup && ln -s $(basename "$FILENAME".gz) $(basename "$LATEST") && cd -
DB_COUNTER=$(( DB_COUNTER + 1 ))
else
rm -rf "$FILENAME"
fi
fi
done
if [ -n "$MAX_BACKUPS" ]
then
MAX_FILES=$(( MAX_BACKUPS * DB_COUNTER ))
while [ "$(find /backup -maxdepth 1 -name "*.sql.gz" -type f | wc -l)" -gt "$MAX_FILES" ]
do
TARGET=$(find /backup -maxdepth 1 -name "*.sql.gz" -type f | sort | head -n 1)
echo "==> Max number of backups ($MAX_BACKUPS) reached. Deleting ${TARGET} ..."
rm -rf "${TARGET}"
echo "==> Backup ${TARGET} deleted"
done
fi
echo "=> [Backup MYSQL] process finished at $(date "+%Y-%m-%d %H:%M:%S")"

48
backup_mongo.sh Normal file
View File

@ -0,0 +1,48 @@
MAX_BACKUPS=${MAX_BACKUPS}
FOLDER=$(date +%Y%m%d%H%M)
date=$(date +%Y%m%d)
dateValue=`date -R`
contentType="application/x-compressed-tar"
echo "=> [Backup MONGO] started at $(date "+%Y-%m-%d %H:%M:%S")"
# BACKUP_CMD="mongodump --out /backup/mongo/"'${FILENAME}'" --host ${MONGODB_HOST} --port ${MONGODB_PORT} ${MONGO_USER}${PASS_STR}${MONGO_PASS} ${EXTRA_OPTS}"
if [ -z "$MONGO_DB" ]
then
BACKUP_CMD="mongodump --host ${MONGODB_HOST} --port ${MONGODB_PORT} -o /backup_mongo/${FOLDER}/"
else
BACKUP_CMD="mongodump --host ${MONGODB_HOST} --port ${MONGODB_PORT} -d ${MONGO_DB} -o /backup_mongo/${FOLDER}/"
fi
if ${BACKUP_CMD} ;then
if [ -n "$S3_BACKUP" ]
then
${BACKUP_CMD}
cd /backup_mongo
tar -zcf ${FOLDER}.tar.gz ${FOLDER}
fname=$(basename "$FOLDER".tar.gz)
echo "Start sending $fname to S3"
resource="/${S3_BUCKET}/${S3_LOCATION}/${date}/mongo/${fname}"
stringToSign="PUT\n\n${contentType}\n${dateValue}\n${resource}"
signature=`echo -en ${stringToSign} | openssl sha1 -hmac ${S3_SECRET} -binary | base64`
curl -X PUT -T "${fname}" \
-H "Host: ${S3_BUCKET}.s3.amazonaws.com" \
-H "Date: ${dateValue}" \
-H "Content-Type: ${contentType}" \
-H "Authorization: AWS ${S3_KEY}:${signature}" \
https://${S3_BUCKET}.s3.amazonaws.com/${S3_LOCATION}/${date}/mongo/${fname}
rm -rf ${FOLDER}.tar.gz
echo "$fname has been sent to S3 successfully."
fi
echo "Backup succeeded"
else
echo "Backup failed"
rmdir "/backup_mongo/${FOLDER}"
fi
if [ -n "$MAX_BACKUPS" ]
then
while [ "$(ls /backup_mongo | wc -l)" -gt "$MAX_BACKUPS" ]
do
FOLDER_DELETED=$(ls /backup_mongo -1 | sort | head -1)
echo "Deleting backup ${BACKUP_TO_BE_DELETED}"
rm -rf "/backup_mongo/${FOLDER_DELETED}"
done
fi

65
docker-compose.yaml Normal file
View File

@ -0,0 +1,65 @@
version: "2"
services:
backup:
container_name: ${APP_NAME}_backup
image: registry.beetai.com:5000/bface_backup:dev
restart: always
networks:
- bi-net
volumes:
- "/home/rnd/sdb5/${APP_NAME}/backup:/backup"
environment:
MYSQL_HOST: ${APP_NAME}_dbsql
MYSQL_PORT: 3306
MYSQL_USER: 'admin'
MYSQL_PASS: 'beetsoft123'
MAX_BACKUPS: 48
INIT_BACKUP: 1
CRON_TIME: 0 * * * *
MYSQL_DB: 'backend'
MONGO_DB: 'bface'
MONGODB_HOST: ${APP_NAME}_mongo
MONGODB_PORT: 27017
BUCKET: biface
S3_LOCATION_BOXAICMS: dbbackup/boxaicms
S3KEY: AKIAZNTBHC26RSA2YZU2
S3SECRET: hKhJZ9iN7N9NMU3IUyGlCoLdpvQINBaVZR3RiqnUss
# dbsql:
# container_name: ${APP_NAME}_dbsql
# image: mysql:8.0
# restart: always
# networks:
# - beetai_network
# ports:
# - "13306:3306"
# command: ['mysqld', '--character-set-server=utf8mb4', '--collation-server=utf8mb4_unicode_ci']
# environment:
# # POSTGRES_DB: "postgres"
# MYSQL_DATABASE: 'backend'
# # So you don"t have to use root, but you can if you like
# # POSTGRES_USER: "postgres"
# MYSQL_USER: 'admin'
# # You can use whatever password you like
# # POSTGRES_PASSWORD: "beetsoft123"
# MYSQL_PASSWORD: 'beetsoft123'
# # Password for root access
# MYSQL_ROOT_PASSWORD: "beetai@2019"
# volumes:
# - "/home/rnd/sdb5/${APP_NAME}/dbsql:/var/lib/mysql"
# # - "/home/rnd/sdb5/${APP_NAME}/dbsql:/var/lib/postgresql/data:z"
# # healthcheck:
# # test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost", "-p$$MYSQL_ROOT_PASSWORD"]
# # interval: 10s
# # timeout: 20s
# # retries: 3
volumes:
data:

18
restore.sh Normal file
View File

@ -0,0 +1,18 @@
#!/bin/bash
[ -z "${MYSQL_USER}" ] && { echo "=> MYSQL_USER cannot be empty" && exit 1; }
[ -z "${MYSQL_PASS}" ] && { echo "=> MYSQL_PASS cannot be empty" && exit 1; }
if [ "$#" -ne 1 ]
then
echo "You must pass the path of the backup file to restore"
fi
echo "=> Restore database from $1"
set -o pipefail
if gunzip --stdout "$1" | mysql -h "$MYSQL_HOST" -P "$MYSQL_PORT" -u "$MYSQL_USER" -p"$MYSQL_PASS" "$MYSQL_DB"
then
echo "=> Restore succeeded"
else
echo "=> Restore failed"
fi

18
restore_mongo.sh Normal file
View File

@ -0,0 +1,18 @@
#!/bin/bash
echo "[Restore mongo] start"
if [ "$#" -ne 1 ]
then
echo "You must pass the path of the backup file to restore"
fi
echo "=> Restore database from $1"
set -o pipefail
RESTORE_CMD="mongorestore --host ${MONGODB_HOST} --port ${MONGODB_PORT} -d ${MONGO_DB} $1"
if ${RESTORE_CMD}
then
echo "=> Restore succeeded"
else
echo "=> Restore failed"
fi
echo "[Restore mongo] finish"

25
run.sh Normal file
View File

@ -0,0 +1,25 @@
#!/bin/bash
touch /logs/mysql_backup.log /logs/mongo_backup.log
tail -F /logs/mysql_backup.log /logs/mongo_backup.log &
rm -rf /crontab.conf
if [ "${INIT_BACKUP}" -gt "0" ]; then
echo "=> Create a backup on the startup"
/backup_mongo.sh
/backup.sh
elif [ -n "${INIT_RESTORE_LATEST}" ]; then
echo "=> Restore latest backup"
until nc -z "$MYSQL_HOST" "$MYSQL_PORT"
do
echo "waiting database container..."
sleep 1
done
find /backup -maxdepth 1 -name '*.sql.gz' | tail -1 | xargs /restore.sh
fi
echo "${CRON_TIME} /backup.sh >> /logs/mysql_backup.log 2>&1 " >> /crontab.conf
echo "${CRON_TIME} /backup_mongo.sh >> /logs/mongo_backup.log 2>&1" >> /crontab.conf
crontab /crontab.conf
echo "=> Running cron task manager"
exec crond -f