Compare commits

...

136 Commits

Author SHA1 Message Date
3dce2017f8 Merge pull request #118 from jkaninda/fix-notification
fix: fix multi backup s3 path
2024-10-10 05:32:08 +02:00
Jonas Kaninda
ed2f1b8d9c fix: fix multi backup s3 path 2024-10-10 05:31:18 +02:00
b64875df21 Merge pull request #117 from jkaninda/fix-notification
docs: correct grammar in  receive-notification.md
2024-10-10 04:28:52 +02:00
Jonas Kaninda
fc90507b3f docs: correct grammar in receive-notification.md 2024-10-10 04:28:02 +02:00
df0efd24d3 Merge pull request #116 from jkaninda/fix-notification
chore: fix infinity calling Fatal, add a backup reference
2024-10-10 04:15:12 +02:00
Jonas Kaninda
e5dd7e76ce chore: fix infinity calling Fatal, add a backup reference 2024-10-10 04:14:42 +02:00
12fbb67a09 Merge pull request #115 from jkaninda/email-notification
docs: update send notification
2024-10-09 22:38:35 +02:00
Jonas Kaninda
df490af7b6 docs: update send notification 2024-10-09 22:38:07 +02:00
d930c3e2f6 Merge pull request #114 from jkaninda/email-notification
feat: add email notification for failed and success backup
2024-10-09 22:32:44 +02:00
Jonas Kaninda
e4258cb12e feat: add email notification for failed and success backup 2024-10-09 22:31:52 +02:00
4c44166921 Merge pull request #113 from jkaninda/develop
Develop
2024-10-09 12:51:15 +02:00
554df819ab Merge pull request #112 from jkaninda/multi-backup
docs: add mutli database backup example
2024-10-09 12:49:46 +02:00
Jonas Kaninda
ca5633882e docs: add mutli database backup example 2024-10-09 12:45:55 +02:00
c5cca82841 Merge pull request #111 from jkaninda/multi-backup
Add Multi database backup
2024-10-09 12:24:37 +02:00
Jonas Kaninda
bbd5422089 ci: change Dockerfile path 2024-10-09 12:23:45 +02:00
Jonas Kaninda
d72156f890 feat: add multi database backup 2024-10-09 12:23:14 +02:00
Jonas Kaninda
909a50dbe7 docs: update backup encryption example 2024-10-08 23:20:50 +02:00
Jonas Kaninda
94ceb71da2 docs: update backup encryption example 2024-10-08 23:05:10 +02:00
Jonas Kaninda
fe05fe5110 feat: add encrypt backup using public key, migrate gpg to go gpg dependency 2024-10-08 23:02:46 +02:00
dabba2050a Merge pull request #110 from jkaninda/refactor
chore: remove os.kill.signal
2024-10-05 10:42:55 +02:00
Jonas Kaninda
47e1ac407b chore: remove os.kill.signal 2024-10-05 10:41:46 +02:00
28f6ed3a82 Merge pull request #109 from jkaninda/refactor
fix: logging time
2024-10-05 10:40:11 +02:00
Jonas Kaninda
504926c7cd fix: logging time 2024-10-05 10:39:49 +02:00
737f473f92 Merge pull request #108 from jkaninda/refactor
Refactor
2024-10-03 18:19:12 +02:00
Jonas Kaninda
300d2a8205 chore: remove testDatabaseConnection function for scheduled mode 2024-10-03 18:18:47 +02:00
Jonas Kaninda
a4ad0502cf chore: add storage type alt for smallcase and uppercase 2024-10-03 18:17:48 +02:00
f344867edf Merge pull request #107 from jkaninda/refactor
docs: update configuration reference
2024-10-02 04:26:05 +02:00
Jonas Kaninda
d774584f64 docs: update configuration reference 2024-10-02 04:25:35 +02:00
96927cd57e Merge pull request #106 from jkaninda/refactor
Refactor
2024-10-02 04:13:20 +02:00
Jonas Kaninda
ceacfa1d9d docs: update ssh and ftp deployment example 2024-10-02 04:09:42 +02:00
Jonas Kaninda
9380a18b45 refactor: remove old arguments, refactor aws and ssh configuration 2024-10-02 04:07:14 +02:00
Jonas Kaninda
d186071df9 Merge pull request #105 from jkaninda/refactor
chore: update app version
2024-09-30 17:49:21 +02:00
Jonas Kaninda
71429b0e1a chore: update app version 2024-09-30 17:48:56 +02:00
Jonas Kaninda
0bed86ded4 Merge pull request #104 from jkaninda/refactor
chore: add Time Zone
2024-09-30 17:45:38 +02:00
Jonas Kaninda
e891801125 chore: add Time Zone 2024-09-30 17:44:45 +02:00
Jonas Kaninda
01cf8a3392 Merge pull request #103 from jkaninda/refactor
fix: MySQL 8.x -Plugin caching_sha2_password could not be loaded
2024-09-30 07:58:39 +02:00
Jonas Kaninda
efea81833a fix: MySQL 8.x -Plugin caching_sha2_password could not be loaded 2024-09-30 07:57:42 +02:00
Jonas Kaninda
1cbf65d686 Merge pull request #102 from jkaninda/refactor
fix: backup date and time
2024-09-30 02:03:08 +02:00
Jonas Kaninda
73d19913f8 fix: backup date and time 2024-09-30 02:02:37 +02:00
Jonas Kaninda
b0224e43ef Merge pull request #101 from jkaninda/docs
docs: add FTP storage
2024-09-30 00:58:42 +02:00
Jonas Kaninda
fa0485bb5a docs: add FTP storage 2024-09-30 00:58:20 +02:00
Jonas Kaninda
65ef6d3e8f Merge pull request #100 from jkaninda/develop
Merge develop
2024-09-30 00:55:42 +02:00
Jonas Kaninda
a7b6abb101 feat: add ftp backup storage 2024-09-30 00:40:35 +02:00
Jonas Kaninda
3b21c109bc chore: migrate baseos from Ubuntu to Alpine 2024-09-29 20:44:11 +02:00
Jonas Kaninda
a50a1ef6f9 Merge pull request #99 from jkaninda/refactor
refactor: replace function params by config struct
2024-09-29 20:09:02 +02:00
Jonas Kaninda
76bbfa35c4 refactor: replace function params by config struct 2024-09-29 20:08:36 +02:00
Jonas Kaninda
599d93bef4 Merge pull request #98 from jkaninda/refactor
refactoring of code
2024-09-29 19:51:07 +02:00
Jonas Kaninda
247e90f73e refactoring of code 2024-09-29 19:50:26 +02:00
Jonas Kaninda
7d544aca68 Merge pull request #97 from jkaninda/docs
chore: add test configurations before running in scheduled mode
2024-09-29 07:35:45 +02:00
Jonas Kaninda
1722ee0eeb chore: add test configurations before running in scheduled mode 2024-09-29 07:35:27 +02:00
Jonas Kaninda
726fd14831 Merge pull request #96 from jkaninda/docs
docs: add docker recurring backup examples
2024-09-29 07:01:27 +02:00
Jonas Kaninda
fdc88e6064 docs: add docker recurring backup examples 2024-09-29 07:00:55 +02:00
Jonas Kaninda
2ba1b516e9 Merge pull request #95 from jkaninda/docs
docs: fix environment variables table
2024-09-28 21:23:43 +02:00
Jonas Kaninda
301594676b docs: fix environment variables table 2024-09-28 21:23:03 +02:00
Jonas Kaninda
d06f2f2d7e Merge pull request #94 from jkaninda/docs
docs: update deployment example
2024-09-28 21:18:37 +02:00
Jonas Kaninda
2f06bd1c3a docs: update deployment example 2024-09-28 21:17:34 +02:00
Jonas Kaninda
f383f5559d Merge pull request #93 from jkaninda/develop
Merge pull request #91 from jkaninda/cron
2024-09-28 10:45:49 +02:00
Jonas Kaninda
3725809d28 Merge pull request #92 from jkaninda/cron
Cron
2024-09-28 10:45:21 +02:00
Jonas Kaninda
b1598ef7d0 chore: update log message 2024-09-28 10:43:08 +02:00
Jonas Kaninda
e4a83b9851 Merge pull request #91 from jkaninda/cron
Cron
2024-09-28 09:55:27 +02:00
Jonas Kaninda
4b2527f416 chore: define gpg home directory 2024-09-28 09:43:51 +02:00
Jonas Kaninda
e97fc7512a fix: generate backup file name in scheduled mode 2024-09-28 09:18:58 +02:00
Jonas Kaninda
7912ce46ed chore: add cron-expression to get value from flag 2024-09-28 08:32:04 +02:00
Jonas Kaninda
050f5e81bc docs: update scheduled mode deployment 2024-09-28 08:30:53 +02:00
Jonas Kaninda
b39e97b77d refactor: clean up project, delete unused files, variables 2024-09-28 08:01:33 +02:00
Jonas Kaninda
cbb73ae89b chore: migrate backup scheduled mode from linux cron to go cron 2024-09-28 07:26:33 +02:00
Jonas Kaninda
29a58aa26d chore: add cron expression verification 2024-09-28 04:45:03 +02:00
Jonas Kaninda
041e0a07e9 Merge pull request #89 from jkaninda/develop
Develop
2024-09-28 03:42:39 +02:00
Jonas Kaninda
9daac9c654 fix: scheduled mode script, remove port number 2024-09-28 03:38:26 +02:00
Jonas Kaninda
f6098769cd fix: backup database in scheduled mode 2024-09-28 03:06:09 +02:00
Jonas Kaninda
5cdfaa4d94 chore: update version in Dockerfile 2024-09-28 02:31:07 +02:00
Jonas Kaninda
b205cd61ea Fix: Using a password on the command line interface can be insecure warning message 2024-09-28 02:25:42 +02:00
Jonas Kaninda
e1307250e8 Merge pull request #88 from jkaninda/jkaninda-patch-1
Update FUNDING.yml
2024-09-12 07:59:57 +02:00
Jonas Kaninda
17ac951deb Update FUNDING.yml 2024-09-12 07:59:46 +02:00
Jonas Kaninda
6e2e08224d Merge pull request #87 from jkaninda/jkaninda-patch-1
Create FUNDING.yml
2024-09-12 07:55:10 +02:00
Jonas Kaninda
570b775f48 Create FUNDING.yml 2024-09-12 07:54:51 +02:00
Jonas Kaninda
e38e106983 Merge pull request #86 from jkaninda/docs
chore: change notification title
2024-09-12 07:10:36 +02:00
Jonas Kaninda
3040420a09 chore: change notification title 2024-09-12 07:10:09 +02:00
Jonas Kaninda
eac5f70408 Merge pull request #85 from jkaninda/docs
Docs
2024-09-12 06:34:32 +02:00
Jonas Kaninda
3476c6f529 docs: update readme 2024-09-12 06:33:38 +02:00
Jonas Kaninda
1a9c8483f8 chore: add code comment 2024-09-12 06:23:57 +02:00
Jonas Kaninda
f8722f7ae4 Merge pull request #84 from jkaninda/docs
Update Intro
2024-09-12 06:18:09 +02:00
Jonas Kaninda
421bf12910 Update Intro 2024-09-12 06:17:46 +02:00
Jonas Kaninda
3da4a27baa Merge pull request #83 from jkaninda/docs
fix: add exit after database connection test failed
2024-09-11 08:03:44 +02:00
Jonas Kaninda
0881f075ef fix: add exit after database connection test failed 2024-09-11 08:03:16 +02:00
Jonas Kaninda
066e73f8e4 Merge pull request #82 from jkaninda/docs
clean up project
2024-09-11 04:55:01 +02:00
Jonas Kaninda
645243ff77 clean up project 2024-09-11 04:53:24 +02:00
Jonas Kaninda
9384998127 Merge pull request #81 from jkaninda/docs
refactor: add Telegram env in Dockerfile, move telegram notification …
2024-09-11 04:37:50 +02:00
Jonas Kaninda
390e7dad0c refactor: add Telegram env in Dockerfile, move telegram notification to utils 2024-09-11 04:37:02 +02:00
Jonas Kaninda
67ea22385f Merge pull request #80 from jkaninda/develop
remove operation old cmd
2024-09-10 23:15:38 +02:00
Jonas Kaninda
cde82d8cfc remove operation old cmd 2024-09-10 23:14:09 +02:00
Jonas Kaninda
4808f093e5 Merge pull request #79 from jkaninda/develop
Update version
2024-09-10 23:11:28 +02:00
Jonas Kaninda
c7a03861fe Update version 2024-09-10 23:10:24 +02:00
Jonas Kaninda
36ec63d522 Merge pull request #78 from jkaninda/develop
feat: Add Telegram notification
2024-09-10 23:04:12 +02:00
Jonas Kaninda
0f07de1d83 feat: Add Telegram notification 2024-09-10 23:01:26 +02:00
Jonas Kaninda
ae55839996 Merge pull request #77 from jkaninda/docs
docs: update Kubernetes deployment
2024-09-09 07:17:51 +02:00
Jonas Kaninda
a7f7e57a0d docs: update Kubernetes deployment 2024-09-09 07:17:15 +02:00
Jonas Kaninda
b2ddaec93b Merge pull request #76 from jkaninda/docs
docs: add buy me a coffee link
2024-09-05 22:43:09 +02:00
Jonas Kaninda
b3570d774c docs: add buy me a coffee link 2024-09-05 22:42:37 +02:00
Jonas Kaninda
38f7e91c03 Merge pull request #75 from jkaninda/develop
chore: rename environment variable for database migration operation
2024-09-03 07:06:24 +02:00
Jonas Kaninda
07c2935925 chore: rename environment variable for database migration operation 2024-09-03 06:49:26 +02:00
Jonas Kaninda
f3c5585051 Merge pull request #74 from jkaninda/docs
Docs
2024-08-30 21:24:50 +02:00
Jonas Kaninda
7163d030a5 chore: remove dbport from command flag 2024-08-30 21:22:18 +02:00
Jonas Kaninda
a2cec86e73 chore: remove dbport from command flag 2024-08-30 21:21:21 +02:00
Jonas Kaninda
662b73579d feat: add migrate database from a source to a target databse
fix: gpg encrypt permission warning message, update Kubernetes deployment example
2024-08-30 19:58:12 +02:00
c9f8a32de1 Merge pull request #73 from jkaninda/docs
docs: update Kubernetes deployment
2024-08-28 20:35:31 +02:00
8fb008151c docs: update Kubernetes deployment 2024-08-28 20:35:01 +02:00
113c84c885 Merge pull request #72 from jkaninda/docs
docs: update readme
2024-08-21 03:53:15 +02:00
58deb92953 docs: update readme 2024-08-21 03:52:49 +02:00
c41afb8b57 Merge pull request #71 from jkaninda/docs
docs: update readme
2024-08-21 03:51:25 +02:00
02e51a3933 docs: update readme 2024-08-21 03:50:59 +02:00
db4061b64b Merge pull request #70 from jkaninda/docs
docs: update readme
2024-08-21 03:49:58 +02:00
9467b157aa docs: update reamdme 2024-08-21 03:49:15 +02:00
c229ebdc9d Merge pull request #69 from jkaninda/docs
docs: fix grammar
2024-08-20 19:21:24 +02:00
7b701d1740 docs: fix grammar 2024-08-20 19:20:54 +02:00
ad6f190bad Merge pull request #68 from jkaninda/docs
docs: update readme
2024-08-15 06:06:26 +02:00
de4dcaaeca docs: update readme 2024-08-15 06:05:39 +02:00
17c0a99bda Merge pull request #67 from jkaninda/develop
Develop
2024-08-15 05:02:56 +02:00
b1c9abf931 Clean up 2024-08-14 22:28:16 +02:00
a70a893c11 Fix encryption permission issue on Openshift 2024-08-14 22:19:35 +02:00
243e25f4fb Fix encryption permission issue on Openshift 2024-08-14 22:19:02 +02:00
cb0dcf4104 Update docs 2024-08-11 09:49:41 +02:00
d26d8d31c9 Merge pull request #65 from jkaninda/docs
Merge Docs
2024-08-11 09:48:08 +02:00
71d438ba76 Merge branch 'main' of github.com:jkaninda/mysql-bkup into develop 2024-08-11 09:44:00 +02:00
a3fc58af96 Add delete /tmp directory after backup or restore and update docs 2024-08-11 09:38:31 +02:00
08ca6d4a39 Merge pull request #64 from jkaninda/develop
docs: update readme
2024-08-10 11:30:53 +02:00
27b9ab5f36 docs: update readme 2024-08-10 11:29:58 +02:00
6d6db7061b Merge pull request #63 from jkaninda/develop
Develop
2024-08-10 11:28:06 +02:00
d90647aae7 Update app version 2024-08-10 11:22:08 +02:00
5c2c05499f docs: update example 2024-08-10 11:12:43 +02:00
88ada6fefd docs: update example 2024-08-10 11:12:17 +02:00
e6c8b0923d Add Docker entrypont, update docs 2024-08-10 10:50:00 +02:00
59a136039c Merge pull request #62 from jkaninda/docs
docs: update stable version
2024-08-04 23:45:36 +02:00
db835e81c4 docs: update stable version 2024-08-04 23:44:49 +02:00
5b05bcbf0c Merge pull request #61 from jkaninda/docs
docs: add Kubernetes restore Job example
2024-08-04 13:38:10 +02:00
b8277c8464 docs: add Kubernetes restore Job example 2024-08-04 13:37:45 +02:00
63 changed files with 2918 additions and 942 deletions

3
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,3 @@
# These are supported funding model platforms
ko_fi: jkaninda

View File

@@ -25,8 +25,10 @@ jobs:
uses: docker/build-push-action@v3 uses: docker/build-push-action@v3
with: with:
push: true push: true
file: "./docker/Dockerfile" file: "./Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7 platforms: linux/amd64,linux/arm64,linux/arm/v7
build-args: |
appVersion=develop-${{ github.sha }}
tags: | tags: |
"${{env.BUILDKIT_IMAGE}}:develop-${{ github.sha }}" "${{vars.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"

View File

@@ -1,4 +1,4 @@
name: Release name: CI
on: on:
push: push:
tags: tags:
@@ -39,11 +39,13 @@ jobs:
uses: docker/build-push-action@v3 uses: docker/build-push-action@v3
with: with:
push: true push: true
file: "./docker/Dockerfile" file: "./Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7 platforms: linux/amd64,linux/arm64,linux/arm/v7
build-args: |
appVersion=${{ env.TAG_NAME }}
tags: | tags: |
"${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}" "${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"${{env.BUILDKIT_IMAGE}}:latest" "${{vars.BUILDKIT_IMAGE}}:latest"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}" "ghcr.io/${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:latest" "ghcr.io/${{vars.BUILDKIT_IMAGE}}:latest"

3
.gitignore vendored
View File

@@ -8,4 +8,5 @@ test.md
mysql-bkup mysql-bkup
/.DS_Store /.DS_Store
/.idea /.idea
bin bin
Makefile

83
Dockerfile Normal file
View File

@@ -0,0 +1,83 @@
FROM golang:1.22.5 AS build
WORKDIR /app
# Copy the source code.
COPY . .
# Installs Go dependencies
RUN go mod download
# Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
FROM alpine:3.20.3
ENV DB_HOST=""
ENV DB_NAME=""
ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT=3306
ENV STORAGE=local
ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME=""
ENV AWS_ACCESS_KEY=""
ENV AWS_SECRET_KEY=""
ENV AWS_S3_PATH=""
ENV AWS_REGION="us-west-2"
ENV AWS_DISABLE_SSL="false"
ENV AWS_FORCE_PATH_STYLE="true"
ENV GPG_PASSPHRASE=""
ENV SSH_USER=""
ENV SSH_PASSWORD=""
ENV SSH_HOST=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT=22
ENV REMOTE_PATH=""
ENV FTP_HOST=""
ENV FTP_PORT=21
ENV FTP_USER=""
ENV FTP_PASSWORD=""
ENV TARGET_DB_HOST=""
ENV TARGET_DB_PORT=3306
ENV TARGET_DB_NAME=""
ENV TARGET_DB_USERNAME=""
ENV TARGET_DB_PASSWORD=""
ENV BACKUP_CRON_EXPRESSION=""
ENV TG_TOKEN=""
ENV TG_CHAT_ID=""
ENV TZ=UTC
ARG WORKDIR="/config"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG TEMPLATES_DIR="/config/templates"
ARG appVersion="v1.2.12"
ENV VERSION=${appVersion}
LABEL author="Jonas Kaninda"
LABEL version=${appVersion}
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir $TEMPLATES_DIR
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
COPY ./templates/* $TEMPLATES_DIR/
RUN chmod +x /usr/local/bin/mysql-bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
# Create backup script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
chmod +x /usr/local/bin/backup
# Create restore script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
chmod +x /usr/local/bin/restore
# Create migrate script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
chmod +x /usr/local/bin/migrate
WORKDIR $WORKDIR
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]

View File

@@ -1,46 +0,0 @@
BINARY_NAME=mysql-bkup
include .env
export
run:
go run . backup
build:
go build -o bin/${BINARY_NAME} .
compile:
GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 .
GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 .
GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 .
GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 .
docker-build:
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
docker-run: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2
docker-restore: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore -f ${FILE_NAME}
docker-run-scheduled: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --mode scheduled --period "* * * * *"
docker-run-scheduled-s3: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
docker-run-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --path /custom-path
docker-restore-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore --storage s3 -f ${FILE_NAME} --path /custom-path
docker-run-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage ssh
docker-restore-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/mysql-bkup bkup restore --storage ssh -f ${FILE_NAME}
run-docs:
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t

124
README.md
View File

@@ -1,19 +1,23 @@
# MySQL Backup # MySQL Backup
mysql-bkup is a Docker container image that can be used to backup and restore Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH compatible storage.
It also supports __encrypting__ your backups using GPG. It also supports __encrypting__ your backups using GPG.
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes. The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage. It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
It also supports __encrypting__ your backups using GPG. It also supports database __encryption__ using GPG.
[![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml) [![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
[![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup) [![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square) ![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square)
![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/mysql-bkup?style=flat-square) ![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/mysql-bkup?style=flat-square)
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
Successfully tested on:
- Docker - Docker
- Docker in Swarm mode
- Kubernetes - Kubernetes
- OpenShift
## Documentation is found at <https://jkaninda.github.io/mysql-bkup> ## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
@@ -30,13 +34,13 @@ It also supports __encrypting__ your backups using GPG.
## Storage: ## Storage:
- Local - Local
- AWS S3 or any S3 Alternatives for Object Storage - AWS S3 or any S3 Alternatives for Object Storage
- SSH - SSH remote server
## Quickstart ## Quickstart
### Simple backup using Docker CLI ### Simple backup using Docker CLI
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command: To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
```shell ```shell
docker run --rm --network your_network_name \ docker run --rm --network your_network_name \
@@ -44,11 +48,17 @@ To run a one time backup, bind your local volume to `/backup` in the container a
-e "DB_HOST=dbhost" \ -e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \ -e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \ -e "DB_PASSWORD=password" \
jkaninda/mysql-bkup mysql-bkup backup -d database_name jkaninda/mysql-bkup backup -d database_name
``` ```
Alternatively, pass a `--env-file` in order to use a full config as described below. Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file ### Simple backup in docker compose file
@@ -61,73 +71,81 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup
- /bin/sh
- -c
- mysql-bkup backup
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
- DB_PORT=5432 - DB_PORT=3306
- DB_HOST=postgres - DB_HOST=mysql
- DB_NAME=foo - DB_NAME=foo
- DB_USERNAME=bar - DB_USERNAME=bar
- DB_PASSWORD=password - DB_PASSWORD=password
- TZ=Europe/Paris
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
### Docker recurring backup
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
```
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
## Deploy on Kubernetes ## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob. For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
### Simple Kubernetes CronJob usage: ### Simple Kubernetes backup Job :
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
kind: CronJob kind: Job
metadata: metadata:
name: bkup-job name: backup-job
spec: spec:
schedule: "0 1 * * *" ttlSecondsAfterFinished: 100
jobTemplate: template:
spec: spec:
template: containers:
spec: - name: mysql-bkup
containers: # In production, it is advised to lock your image tag to a proper
- name: mysql-bkup # release version instead of using `latest`.
image: jkaninda/mysql-bkup # Check https://github.com/jkaninda/mysql-bkup/releases
command: # for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh - /bin/sh
- -c - -c
- mysql-bkup backup -s s3 --path /custom_path - backup -d dbname
env: resources:
- name: DB_PORT limits:
value: "5432" memory: "128Mi"
- name: DB_HOST cpu: "500m"
value: "" env:
- name: DB_NAME - name: DB_HOST
value: "" value: "mysql"
- name: DB_USERNAME - name: DB_USERNAME
value: "" value: "user"
# Please use secret! - name: DB_PASSWORD
- name: DB_PASSWORD value: "password"
value: "" volumeMounts:
- name: AWS_S3_ENDPOINT - mountPath: /backup
value: "https://s3.amazonaws.com" name: backup
- name: AWS_S3_BUCKET_NAME volumes:
value: "xxx" - name: backup
- name: AWS_REGION hostPath:
value: "us-west-2" path: /home/toto/backup # directory location on host
- name: AWS_ACCESS_KEY type: Directory # this field is optional
value: "xxxx" restartPolicy: Never
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
``` ```
## Available image registries ## Available image registries
@@ -135,8 +153,8 @@ This Docker image is published to both Docker Hub and the GitHub container regis
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`: Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
``` ```
docker pull jkaninda/mysql-bkup:v1.0 docker pull jkaninda/mysql-bkup
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0 docker pull ghcr.io/jkaninda/mysql-bkup
``` ```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well. Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
@@ -150,7 +168,7 @@ While it may work against different implementations, there are no guarantees abo
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy. - The original image is based on `alpine` and requires additional tools, making it heavy.
- This image is written in Go. - This image is written in Go.
- `arm64` and `arm/v7` architectures are supported. - `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported. - Docker in Swarm mode is supported.

View File

@@ -1,3 +1,9 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd package cmd
import ( import (
@@ -21,8 +27,9 @@ var BackupCmd = &cobra.Command{
func init() { func init() {
//Backup //Backup
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled") BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time") BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled") BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days") BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression") BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")

27
cmd/migrate.go Normal file
View File

@@ -0,0 +1,27 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd
import (
"github.com/jkaninda/mysql-bkup/pkg"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
)
var MigrateCmd = &cobra.Command{
Use: "migrate",
Short: "Migrate database from a source database to a target database",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
pkg.StartMigration(cmd)
} else {
utils.Fatal("Error, no argument required")
}
},
}

View File

@@ -24,5 +24,7 @@ var RestoreCmd = &cobra.Command{
func init() { func init() {
//Restore //Restore
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database") RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
} }

View File

@@ -1,7 +1,9 @@
// Package cmd /* // Package cmd /
/* /*****
Copyright © 2024 Jonas Kaninda @author Jonas Kaninda
*/ @license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd package cmd
import ( import (
@@ -30,13 +32,10 @@ func Execute() {
} }
func init() { func init() {
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name") rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
rootCmd.AddCommand(VersionCmd) rootCmd.AddCommand(VersionCmd)
rootCmd.AddCommand(BackupCmd) rootCmd.AddCommand(BackupCmd)
rootCmd.AddCommand(RestoreCmd) rootCmd.AddCommand(RestoreCmd)
rootCmd.AddCommand(MigrateCmd)
} }

View File

@@ -1,9 +1,11 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd package cmd
/*
Copyright © 2024 Jonas Kaninda
*/
import ( import (
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"

View File

@@ -1,67 +0,0 @@
FROM golang:1.22.5 AS build
WORKDIR /app
# Copy the source code.
COPY . .
# Installs Go dependencies
RUN go mod download
# Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
FROM ubuntu:24.04
ENV DB_HOST=""
ENV DB_NAME=""
ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT="3306"
ENV STORAGE=local
ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME=""
ENV AWS_ACCESS_KEY=""
ENV AWS_SECRET_KEY=""
ENV AWS_REGION="us-west-2"
ENV AWS_S3_PATH=""
ENV AWS_DISABLE_SSL="false"
ENV GPG_PASSPHRASE=""
ENV SSH_USER=""
ENV SSH_REMOTE_PATH=""
ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22"
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.0"
ARG WORKDIR="/app"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
LABEL author="Jonas Kaninda"
RUN apt-get update -qq
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
RUN apt install mysql-client supervisor cron gnupg -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
WORKDIR $WORKDIR

View File

@@ -1,13 +0,0 @@
[supervisord]
nodaemon=true
user=root
logfile=/var/log/supervisor/supervisord.log
pidfile=/var/run/supervisord.pid
[program:cron]
command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15"
autostart=true
autorestart=true
user = root
stderr_logfile=/var/log/cron.err.log
stdout_logfile=/var/log/cron.out.log

View File

@@ -1,12 +0,0 @@
FROM ruby:3.3.4
ENV LC_ALL C.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
WORKDIR /usr/src/app
COPY . ./
RUN gem install bundler && bundle install
EXPOSE 4000

View File

@@ -13,10 +13,11 @@
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on. # you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
# You can create any custom variable you would like, and they will be accessible # You can create any custom variable you would like, and they will be accessible
# in the templates via {{ site.myvariable }}. # in the templates via {{ site.myvariable }}.
title: MySQL database backup title: MySQL Backup Docker container image
email: hi@jonaskaninda.com email: hi@jonaskaninda.com
description: >- # this means to ignore newlines until "baseurl:" description: >- # this means to ignore newlines until "baseurl:"
MySQL Backup and Restore Docker container image. Backup database to AWS S3 storage or SSH remote server. MySQL Backup is a Docker container image that can be used to backup and restore MySQL database.
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
baseurl: "" # the subpath of your site, e.g. /blog baseurl: "" # the subpath of your site, e.g. /blog
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com

View File

@@ -1,13 +0,0 @@
services:
jekyll:
build:
context: ./
ports:
- 4000:4000
environment:
- JEKYLL_ENV=development
volumes:
- .:/usr/src/app
stdin_open: true
tty: true
command: bundle exec jekyll serve -H 0.0.0.0 -t

BIN
docs/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

View File

@@ -0,0 +1,44 @@
---
title: Backup to FTP remote server
layout: default
parent: How Tos
nav_order: 4
---
# Backup to FTP remote server
As described for SSH backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note }
These environment variables are required for SSH backup `FTP_HOST`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage ftp -d database
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## FTP config
- FTP_HOST="hostname"
- FTP_PORT=21
- FTP_USER=user
- FTP_PASSWORD=password
- REMOTE_PATH=/home/jkaninda/backups
# pg-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -22,10 +22,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage s3 -d database --path /my-custom-path
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d database --path /my-custom-path
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
@@ -51,7 +48,7 @@ networks:
### Recurring backups to S3 ### Recurring backups to S3
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`. As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below. In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
```yml ```yml
services: services:
@@ -62,10 +59,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage s3 -d my-database --cron-expression "0 1 * * *"
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
@@ -78,6 +72,7 @@ services:
- AWS_REGION="us-west-2" - AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx - AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
@@ -110,7 +105,7 @@ spec:
command: command:
- /bin/sh - /bin/sh
- -c - -c
- mysql-bkup backup -s s3 --path /custom_path - backup -s s3 --path /custom_path
env: env:
- name: DB_PORT - name: DB_PORT
value: "3306" value: "3306"

View File

@@ -7,11 +7,11 @@ nav_order: 3
# Backup to SSH remote server # Backup to SSH remote server
As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`. As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable. You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note } {: .note }
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server. These environment variables are required for SSH backup `SSH_HOST`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
Accessing the remote server using password is not recommended, use private key instead. Accessing the remote server using password is not recommended, use private key instead.
```yml ```yml
@@ -23,10 +23,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage remote -d database
- /bin/sh
- -c
- mysql-bkup backup --storage remote -d database
volumes: volumes:
- ./id_ed25519:/tmp/id_ed25519" - ./id_ed25519:/tmp/id_ed25519"
environment: environment:
@@ -36,10 +33,10 @@ services:
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## SSH config ## SSH config
- SSH_HOST_NAME="hostname" - SSH_HOST="hostname"
- SSH_PORT=22 - SSH_PORT=22
- SSH_USER=user - SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups - REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519 - SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password ## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password #- SSH_PASSWORD=password
@@ -55,7 +52,7 @@ networks:
### Recurring backups to SSH remote server ### Recurring backups to SSH remote server
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`. As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below. You can use `--cron-expression "* * * * *"` or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
```yml ```yml
services: services:
@@ -66,10 +63,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup -d database --storage ssh --cron-expression "0 1 * * *"
- /bin/sh
- -c
- mysql-bkup backup -d database --storage ssh --mode scheduled --period "0 1 * * *"
volumes: volumes:
- ./id_ed25519:/tmp/id_ed25519" - ./id_ed25519:/tmp/id_ed25519"
environment: environment:
@@ -79,11 +73,12 @@ services:
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## SSH config ## SSH config
- SSH_HOST_NAME="hostname" - SSH_HOST="hostname"
- SSH_PORT=22 - SSH_PORT=22
- SSH_USER=user - SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups - REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519 - SSH_IDENTIFY_FILE=/tmp/id_ed25519
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
## We advise you to use a private jey instead of password ## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password #- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
@@ -117,7 +112,7 @@ spec:
command: command:
- /bin/sh - /bin/sh
- -c - -c
- mysql-bkup backup -s ssh - backup -s ssh
env: env:
- name: DB_PORT - name: DB_PORT
value: "3306" value: "3306"
@@ -130,13 +125,13 @@ spec:
# Please use secret! # Please use secret!
- name: DB_PASSWORD - name: DB_PASSWORD
value: "" value: ""
- name: SSH_HOST_NAME - name: SSH_HOST
value: "" value: ""
- name: SSH_PORT - name: SSH_PORT
value: "22" value: "22"
- name: SSH_USER - name: SSH_USER
value: "xxx" value: "xxx"
- name: SSH_REMOTE_PATH - name: REMOTE_PATH
value: "/home/jkaninda/backups" value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY - name: AWS_ACCESS_KEY
value: "xxxx" value: "xxxx"

View File

@@ -7,7 +7,7 @@ nav_order: 1
# Backup database # Backup database
To backup the database, you need to add `backup` subcommand to `mysql-bkup` or `bkup`. To backup the database, you need to add `backup` command.
{: .note } {: .note }
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression. The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
@@ -27,10 +27,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup -d database
- /bin/sh
- -c
- mysql-bkup backup -d database
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -54,10 +51,10 @@ networks:
-e "DB_HOST=dbhost" \ -e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \ -e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \ -e "DB_PASSWORD=password" \
jkaninda/mysql-bkup mysql-bkup backup -d database_name jkaninda/mysql-bkup backup -d database_name
``` ```
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below. In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
```yml ```yml
services: services:
@@ -68,10 +65,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup -d database --cron-expression "0 1 * * *"
- /bin/sh
- -c
- mysql-bkup backup -d database --mode scheduled --period "0 1 * * *"
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -80,6 +74,7 @@ services:
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
- BACKUP_CRON_EXPRESSION=0 1 * * *
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
- web - web

View File

@@ -0,0 +1,303 @@
---
title: Deploy on Kubernetes
layout: default
parent: How Tos
nav_order: 9
---
## Deploy on Kubernetes
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
## Backup to S3 storage
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup
spec:
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
```
## Backup Job to SSH remote server
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/toto/backup"
# Optional, required if you want to encrypt your backup
- name: GPG_PASSPHRASE
value: "xxxx"
restartPolicy: Never
```
## Restore Job
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- restore
- --storage
- ssh
- --file store_20231219_022941.sql.gz
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/xxxx/backup"
# Optional, required if your backup was encrypted
#- name: GPG_PASSPHRASE
# value: "xxxx"
restartPolicy: Never
```
## Recurring backup
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-job
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "username"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "xxx"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: Never
```
## Kubernetes Rootless
This image also supports Kubernetes security context, you can run it in Rootless environment.
It has been tested on Openshift, it works well.
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-job
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
securityContext:
runAsUser: 1000
runAsGroup: 3000
fsGroup: 2000
containers:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "xxx"
- name: DB_USERNAME
value: "xxx"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: OnFailure
```

View File

@@ -0,0 +1,6 @@
---
title: Update deprecated configurations
layout: default
parent: How Tos
nav_order: 11
---

View File

@@ -1,27 +1,39 @@
--- ---
title: Encrypt backups using GPG title: Encrypt backups
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 7 nav_order: 8
--- ---
# Encrypt backup # Encrypt backup
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg. The image supports encrypting backups using one of two available methods: GPG with passphrase or GPG with a public key.
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
{: .warning } {: .warning }
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process. To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
To decrypt manually, you need to install gnupg - GPG home directory `/config/gnupg`
- Cipher algorithm `aes256`
### Decrypt backup {: .note }
The backup encrypted using `GPG passphrase` method can be restored automatically, no need to decrypt it before restoration.
Suppose you used a GPG public key during the backup process. In that case, you need to decrypt your backup before restoration because decryption using a `GPG private` key is not fully supported.
To decrypt manually, you need to install `gnupg`
```shell ```shell
gpg --batch --passphrase "my-passphrase" \ gpg --batch --passphrase "my-passphrase" \
--output database_20240730_044201.sql.gz \ --output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg --decrypt database_20240730_044201.sql.gz.gpg
``` ```
Using your private key
### Backup ```shell
gpg --output database_20240730_044201.sql.gz --decrypt database_20240730_044201.sql.gz.gpg
```
## Using GPG passphrase
```yml ```yml
services: services:
@@ -32,10 +44,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup -d database
- /bin/sh
- -c
- mysql-bkup backup -d database
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -51,4 +60,32 @@ services:
- web - web
networks: networks:
web: web:
```
## Using GPG Public Key
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Required to encrypt backup
- GPG_PUBLIC_KEY=/config/public_key.asc
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
``` ```

131
docs/how-tos/migrate.md Normal file
View File

@@ -0,0 +1,131 @@
---
title: Migrate database
layout: default
parent: How Tos
nav_order: 10
---
# Migrate database
To migrate the database, you need to add `migrate` command.
{: .note }
The Mysql backup has another great feature: migrating your database from a source database to a target.
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
{: .warning }
The `migrate` operation is irreversible, please backup your target database before this action.
### Docker compose
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: migrate
volumes:
- ./backup:/backup
environment:
## Source database
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Target database
- TARGET_DB_HOST=target-mysql
- TARGET_DB_PORT=3306
- TARGET_DB_NAME=dbname
- TARGET_DB_USERNAME=username
- TARGET_DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Migrate database using Docker CLI
```
## Source database
DB_HOST=mysql
DB_PORT=3306
DB_NAME=dbname
DB_USERNAME=username
DB_PASSWORD=password
## Taget database
TARGET_DB_HOST=target-mysql
TARGET_DB_PORT=3306
TARGET_DB_NAME=dbname
TARGET_DB_USERNAME=username
TARGET_DB_PASSWORD=password
```
```shell
docker run --rm --network your_network_name \
--env-file your-env
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup migrate
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migrate-db
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- migrate
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
## Source Database
- name: DB_HOST
value: "mysql"
- name: DB_PORT
value: "3306"
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
- name: DB_PASSWORD
value: "password"
## Target Database
- name: TARGET_DB_HOST
value: "target-mysql"
- name: TARGET_DB_PORT
value: "3306"
- name: TARGET_DB_NAME
value: "dbname"
- name: TARGET_DB_USERNAME
value: "username"
- name: TARGET_DB_PASSWORD
value: "password"
restartPolicy: Never
```

View File

@@ -0,0 +1,63 @@
---
title: Run multiple backup schedules in the same container
layout: default
parent: How Tos
nav_order: 11
---
Multiple backup schedules with different configuration can be configured by mounting a configuration file into `/config/config.yaml` `/config/config.yml` or by defining an environment variable `BACKUP_CONFIG_FILE=/backup/config.yaml`.
## Configuration file
```yaml
#cronExpression: "@every 20m" //Optional for scheduled backups
cronExpression: ""
databases:
- host: mysql1
port: 3306
name: database1
user: database1
password: password
path: /s3-path/database1 #For SSH or FTP you need to define the full path (/home/toto/backup/)
- host: mysql2
port: 3306
name: lldap
user: lldap
password: password
path: /s3-path/lldap #For SSH or FTP you need to define the full path (/home/toto/backup/)
- host: mysql3
port: 3306
name: keycloak
user: keycloak
password: password
path: /s3-path/keycloak #For SSH or FTP you need to define the full path (/home/toto/backup/)
- host: mysql4
port: 3306
name: joplin
user: joplin
password: password
path: /s3-path/joplin #For SSH or FTP you need to define the full path (/home/toto/backup/)
```
## Docker compose file
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
## Multi backup config file
- BACKUP_CONFIG_FILE=/backup/config.yaml
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -0,0 +1,170 @@
---
title: Receive notifications
layout: default
parent: How Tos
nav_order: 12
---
Send Email or Telegram notifications on successfully or failed backup.
### Email
To send out email notifications on failed or successfully backup runs, provide SMTP credentials, a sender and a recipient:
```yaml
services:
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
- MAIL_HOST=
- MAIL_PORT=587
- MAIL_USERNAME=
- MAIL_PASSWORD=!
- MAIL_FROM=
- MAIL_TO=me@example.com,team@example.com,manager@example.com
- MAIL_SKIP_TLS=false
## Time format for notification
- TIME_FORMAT=2006-01-02 at 15:04:05
## Backup reference, in case you want to identify every backup instance
- BACKUP_REFERENCE=database/Paris cluster
networks:
- web
networks:
web:
```
### Telegram
```yaml
services:
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
- TG_TOKEN=[BOT ID]:[BOT TOKEN]
- TG_CHAT_ID=
## Time format for notification
- TIME_FORMAT=2006-01-02 at 15:04:05
## Backup reference, in case you want to identify every backup instance
- BACKUP_REFERENCE=database/Paris cluster
networks:
- web
networks:
web:
```
### Customize notifications
The title and body of the notifications can be tailored to your needs using Go templates.
Template sources must be mounted inside the container in /config/templates:
- email.template: Email notification template
- telegram.template: Telegram notification template
- email-error.template: Error notification template
- telegram-error.template: Error notification template
### Data
Here is a list of all data passed to the template:
- `Database` : Database name
- `StartTime`: Backup start time process
- `EndTime`: Backup start time process
- `Storage`: Backup storage
- `BackupLocation`: Backup location
- `BackupSize`: Backup size
- `BackupReference`: Backup reference(eg: database/cluster name or server name)
> email.template:
```html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>✅ Database Backup Notification {{.Database}}</title>
</head>
<body>
<h2>Hi,</h2>
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
<h3>Backup Details:</h3>
<ul>
<li>Database Name: {{.Database}}</li>
<li>Backup Start Time: {{.StartTime}}</li>
<li>Backup End Time: {{.EndTime}}</li>
<li>Backup Storage: {{.Storage}}</li>
<li>Backup Location: {{.BackupLocation}}</li>
<li>Backup Size: {{.BackupSize}} bytes</li>
<li>Backup Reference: {{.BackupReference}} </li>
</ul>
<p>Best regards,</p>
</body>
</html>
```
> telegram.template
```html
✅ Database Backup Notification {{.Database}}
Hi,
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
Backup Details:
- Database Name: {{.Database}}
- Backup Start Time: {{.StartTime}}
- Backup EndTime: {{.EndTime}}
- Backup Storage: {{.Storage}}
- Backup Location: {{.BackupLocation}}
- Backup Size: {{.BackupSize}} bytes
- Backup Reference: {{.BackupReference}}
```
> email-error.template
```html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>🔴 Urgent: Database Backup Failure Notification</title>
</head>
<body>
<h2>Hi,</h2>
<p>An error occurred during database backup.</p>
<h3>Failure Details:</h3>
<ul>
<li>Error Message: {{.Error}}</li>
<li>Date: {{.EndTime}}</li>
<li>Backup Reference: {{.BackupReference}} </li>
</ul>
</body>
</html>
```
> telegram-error.template
```html
🔴 Urgent: Database Backup Failure Notification
An error occurred during database backup.
Failure Details:
Error Message: {{.Error}}
Date: {{.EndTime}}
```

View File

@@ -2,12 +2,12 @@
title: Restore database from AWS S3 title: Restore database from AWS S3
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 5 nav_order: 6
--- ---
# Restore database from S3 storage # Restore database from S3 storage
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note } {: .note }
It supports __.sql__ and __.sql.gz__ compressed file. It supports __.sql__ and __.sql.gz__ compressed file.
@@ -23,10 +23,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
- /bin/sh
- -c
- mysql-bkup restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -52,50 +49,47 @@ networks:
## Restore on Kubernetes ## Restore on Kubernetes
Simple Kubernetes restore Job:
### Simple Kubernetes CronJob usage:
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
kind: CronJob kind: Job
metadata: metadata:
name: bkup-job name: restore-db
spec: spec:
schedule: "0 1 * * *" template:
jobTemplate:
spec: spec:
template: containers:
spec: - name: mysql-bkup
containers: image: jkaninda/mysql-bkup
- name: mysql-bkup command:
image: jkaninda/mysql-bkup
command:
- /bin/sh - /bin/sh
- -c - -c
- mysql-bkup restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz - restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
env: env:
- name: DB_PORT - name: DB_PORT
value: "3306" value: "3306"
- name: DB_HOST - name: DB_HOST
value: "" value: ""
- name: DB_NAME - name: DB_NAME
value: "" value: ""
- name: DB_USERNAME - name: DB_USERNAME
value: "" value: ""
# Please use secret! # Please use secret!
- name: DB_PASSWORD - name: DB_PASSWORD
value: "" value: ""
- name: AWS_S3_ENDPOINT - name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com" value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME - name: AWS_S3_BUCKET_NAME
value: "xxx" value: "xxx"
- name: AWS_REGION - name: AWS_REGION
value: "us-west-2" value: "us-west-2"
- name: AWS_ACCESS_KEY - name: AWS_ACCESS_KEY
value: "xxxx" value: "xxxx"
- name: AWS_SECRET_KEY - name: AWS_SECRET_KEY
value: "xxxx" value: "xxxx"
- name: AWS_DISABLE_SSL - name: AWS_DISABLE_SSL
value: "false" value: "false"
restartPolicy: OnFailure restartPolicy: Never
``` backoffLimit: 4
```

View File

@@ -2,11 +2,11 @@
title: Restore database from SSH title: Restore database from SSH
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 6 nav_order: 7
--- ---
# Restore database from SSH remote server # Restore database from SSH remote server
To restore the database from your remote server, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note } {: .note }
It supports __.sql__ and __.sql.gz__ compressed file. It supports __.sql__ and __.sql.gz__ compressed file.
@@ -22,10 +22,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
- /bin/sh
- -c
- mysql-bkup restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -50,49 +47,47 @@ networks:
``` ```
## Restore on Kubernetes ## Restore on Kubernetes
Simple Kubernetes CronJob usage: Simple Kubernetes restore Job:
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
kind: CronJob kind: Job
metadata: metadata:
name: bkup-job name: restore-db
spec: spec:
schedule: "0 1 * * *" template:
jobTemplate:
spec: spec:
template: containers:
spec: - name: mysql-bkup
containers: image: jkaninda/mysql-bkup
- name: mysql-bkup command:
image: jkaninda/mysql-bkup
command:
- /bin/sh - /bin/sh
- -c - -c
- mysql-bkup restore -s ssh -f store_20231219_022941.sql.gz - restore -s ssh -f store_20231219_022941.sql.gz
env: env:
- name: DB_PORT - name: DB_PORT
value: "3306" value: "3306"
- name: DB_HOST - name: DB_HOST
value: "" value: ""
- name: DB_NAME - name: DB_NAME
value: "" value: ""
- name: DB_USERNAME - name: DB_USERNAME
value: "" value: ""
# Please use secret! # Please use secret!
- name: DB_PASSWORD - name: DB_PASSWORD
value: "" value: ""
- name: SSH_HOST_NAME - name: SSH_HOST_NAME
value: "" value: ""
- name: SSH_PORT - name: SSH_PORT
value: "22" value: "22"
- name: SSH_USER - name: SSH_USER
value: "xxx" value: "xxx"
- name: SSH_REMOTE_PATH - name: SSH_REMOTE_PATH
value: "/home/jkaninda/backups" value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY - name: AWS_ACCESS_KEY
value: "xxxx" value: "xxxx"
- name: SSH_IDENTIFY_FILE - name: SSH_IDENTIFY_FILE
value: "/tmp/id_ed25519" value: "/tmp/id_ed25519"
restartPolicy: Never restartPolicy: Never
backoffLimit: 4
``` ```

View File

@@ -2,12 +2,12 @@
title: Restore database title: Restore database
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 4 nav_order: 5
--- ---
# Restore database # Restore database
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note } {: .note }
It supports __.sql__ and __.sql.gz__ compressed file. It supports __.sql__ and __.sql.gz__ compressed file.
@@ -23,10 +23,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: restore -d database -f store_20231219_022941.sql.gz
- /bin/sh
- -c
- mysql-bkup restore -d database -f store_20231219_022941.sql.gz
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:

View File

@@ -6,7 +6,7 @@ nav_order: 1
# About mysql-bkup # About mysql-bkup
{:.no_toc} {:.no_toc}
mysql-bkup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
It also supports __encrypting__ your backups using GPG. It also supports __encrypting__ your backups using GPG.
We are open to receiving stars, PRs, and issues! We are open to receiving stars, PRs, and issues!
@@ -19,7 +19,8 @@ We are open to receiving stars, PRs, and issues!
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes. The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage. It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
It also supports __encrypting__ your backups using GPG. It also supports database __encryption__ using GPG.
{: .note } {: .note }
Code and documentation for `v1` version on [this branch][v1-branch]. Code and documentation for `v1` version on [this branch][v1-branch].
@@ -32,7 +33,7 @@ Code and documentation for `v1` version on [this branch][v1-branch].
### Simple backup using Docker CLI ### Simple backup using Docker CLI
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command: To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
```shell ```shell
docker run --rm --network your_network_name \ docker run --rm --network your_network_name \
@@ -40,11 +41,18 @@ To run a one time backup, bind your local volume to `/backup` in the container a
-e "DB_HOST=dbhost" \ -e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \ -e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \ -e "DB_PASSWORD=password" \
jkaninda/mysql-bkup mysql-bkup backup -d database_name jkaninda/mysql-bkup backup -d database_name
``` ```
Alternatively, pass a `--env-file` in order to use a full config as described below. Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file ### Simple backup in docker compose file
```yaml ```yaml
@@ -56,24 +64,77 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup
- /bin/sh
- -c
- mysql-bkup backup
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=postgres - DB_HOST=mysql
- DB_NAME=foo - DB_NAME=foo
- DB_USERNAME=bar - DB_USERNAME=bar
- DB_PASSWORD=password - DB_PASSWORD=password
- TZ=Europe/Paris
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
### Docker recurring backup
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
```
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries ## Available image registries
@@ -81,8 +142,8 @@ This Docker image is published to both Docker Hub and the GitHub container regis
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`: Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
``` ```
docker pull jkaninda/mysql-bkup:v1.0 docker pull jkaninda/mysql-bkup
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0 docker pull ghcr.io/jkaninda/mysql-bkup
``` ```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well. Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
@@ -96,7 +157,7 @@ While it may work against different implementations, there are no guarantees abo
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy. - The original image is based on `alpine` and requires additional tools, making it heavy.
- This image is written in Go. - This image is written in Go.
- `arm64` and `arm/v7` architectures are supported. - `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported. - Docker in Swarm mode is supported.

View File

@@ -21,7 +21,7 @@ In the old version, S3 storage was mounted using s3fs, so we decided to migrate
| Options | Shorts | Usage | | Options | Shorts | Usage |
|-----------------------|--------|------------------------------------------------------------------------| |-----------------------|--------|------------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility | | mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation | | backup | | Backup database operation |
| restore | | Restore database operation | | restore | | Restore database operation |
| history | | Show the history of backup | | history | | Show the history of backup |

View File

@@ -6,7 +6,7 @@ nav_order: 2
# Configuration reference # Configuration reference
Backup and restore targets, schedule and retention are configured using environment variables or flags. Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
@@ -19,48 +19,63 @@ Backup and restore targets, schedule and retention are configured using environm
| mysql-bkup | bkup | CLI utility | | mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation | | backup | | Backup database operation |
| restore | | Restore database operation | | restore | | Restore database operation |
| migrate | | Migrate database from one instance to another one |
| --storage | -s | Storage. local or s3 (default: local) | | --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name for restoration | | --file | -f | File name for restoration |
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` | | --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
| --dbname | -d | Database name | | --dbname | -d | Database name |
| --port | -p | Database port (default: 3306) | | --port | -p | Database port (default: 3306) |
| --mode | -m | Execution mode. default or scheduled (default: default) |
| --disable-compression | | Disable database backup compression | | --disable-compression | | Disable database backup compression |
| --prune | | Delete old backup, default disabled | | --prune | | Delete old backup, default disabled |
| --keep-last | | Delete old backup created more than specified days ago, default 7 days | | --keep-last | | Delete old backup created more than specified days ago, default 7 days |
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") | | --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
| --help | -h | Print this help message and exit | | --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit | | --version | -V | Print version information and exit |
## Environment variables ## Environment variables
| Name | Requirement | Description | | Name | Requirement | Description |
|-------------------|--------------------------------------------------|------------------------------------------------------| |------------------------|---------------------------------------------------------------|-----------------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number | | DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host | | DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name | | DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name | | DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password | | DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key | | AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key | | AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | | AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | | AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region | | AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL | | AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) | | AWS_FORCE_PATH_STYLE | Optional, required for S3 storage | Force path style |
| Gmysql_PASSPHRASE | Optional, required to encrypt and restore backup | Gmysql passphrase | | FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip | | GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| SSH_USER | Optional, required for SSH storage | ssh remote user | | GPG_PUBLIC_KEY | Optional, required to encrypt backup | GPG public key, used to encrypt backup (/config/public_key.asc) |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password | | BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key | | SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port | | SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) | | SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
| FTP_HOST | Optional, required for FTP storage | FTP host name |
| FTP_PORT | Optional, required for FTP storage | FTP server port number |
| FTP_USER | Optional, required for FTP storage | FTP user |
| FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
| TZ | Optional | Time Zone |
--- ---
## Run in Scheduled mode ## Run in Scheduled mode
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources. This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag. For Docker, you need to run it in scheduled mode by adding `--cron-expression "* * * * *"` flag or by defining `BACKUP_CRON_EXPRESSION=0 1 * * *` environment variable.
## Syntax of crontab (field description) ## Syntax of crontab (field description)
@@ -102,4 +117,22 @@ Easy to remember format:
```conf ```conf
0 1 * * * 0 1 * * *
``` ```
## Predefined schedules
You may use one of several pre-defined schedules in place of a cron expression.
| Entry | Description | Equivalent To |
|------------------------|--------------------------------------------|---------------|
| @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * |
| @monthly | Run once a month, midnight, first of month | 0 0 1 * * |
| @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 |
| @daily (or @midnight) | Run once a day, midnight | 0 0 * * * |
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
### Intervals
You may also schedule backup task at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
@every <duration>
where "duration" is a string accepted by time.
For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that.

View File

@@ -6,10 +6,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage s3 -d my-database"
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d my-database"
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql

View File

@@ -1,16 +1,17 @@
version: "3" version: "3"
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --dbname database_name
- /bin/sh
- -c
- mysql-bkup backup --dbname database_name --mode scheduled --period "0 1 * * *"
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
- DB_USERNAME=userName - DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD} - DB_PASSWORD=${DB_PASSWORD}
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *

View File

@@ -6,10 +6,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage s3 -d my-database
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
@@ -24,6 +21,8 @@ services:
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
- web - web

View File

@@ -3,10 +3,7 @@ services:
mysql-bkup: mysql-bkup:
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --dbname database_name
- /bin/sh
- -c
- mysql-bkup backup --dbname database_name
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:

View File

@@ -1,44 +1,47 @@
piVersion: batch/v1 apiVersion: batch/v1
kind: CronJob kind: Job
metadata: metadata:
name: bkup-job name: backup
spec: spec:
schedule: "0 1 * * *" template:
jobTemplate:
spec: spec:
template: containers:
spec: - name: mysql-bkup
containers: # In production, it is advised to lock your image tag to a proper
- name: mysql-bkup # release version instead of using `latest`.
image: jkaninda/mysql-bkup # Check https://github.com/jkaninda/mysql-bkup/releases
command: # for a list of available releases.
- /bin/sh image: jkaninda/mysql-bkup
- -c command:
- mysql-bkup backup -s s3 --path /custom_path - /bin/sh
env: - -c
- name: DB_PORT - backup --storage s3
value: "3306" resources:
- name: DB_HOST limits:
value: "" memory: "128Mi"
- name: DB_NAME cpu: "500m"
value: "" env:
- name: DB_USERNAME - name: DB_PORT
value: "" value: "3306"
# Please use secret! - name: DB_HOST
- name: DB_PASSWORD value: ""
value: "" - name: DB_NAME
- name: ACCESS_KEY value: "dbname"
value: "" - name: DB_USERNAME
- name: AWS_S3_ENDPOINT value: "username"
value: "https://s3.amazonaws.com" # Please use secret!
- name: AWS_S3_BUCKET_NAME - name: DB_PASSWORD
value: "xxx" value: ""
- name: AWS_REGION - name: AWS_S3_ENDPOINT
value: "us-west-2" value: "https://s3.amazonaws.com"
- name: AWS_ACCESS_KEY - name: AWS_S3_BUCKET_NAME
value: "xxxx" value: "xxx"
- name: AWS_SECRET_KEY - name: AWS_REGION
value: "xxxx" value: "us-west-2"
- name: AWS_DISABLE_SSL - name: AWS_ACCESS_KEY
value: "false" value: "xxxx"
restartPolicy: OnFailure - name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never

13
go.mod
View File

@@ -5,17 +5,30 @@ go 1.22.5
require github.com/spf13/pflag v1.0.5 require github.com/spf13/pflag v1.0.5
require ( require (
github.com/ProtonMail/gopenpgp/v2 v2.7.5
github.com/aws/aws-sdk-go v1.55.3 github.com/aws/aws-sdk-go v1.55.3
github.com/bramvdbogaerde/go-scp v1.5.0 github.com/bramvdbogaerde/go-scp v1.5.0
github.com/hpcloud/tail v1.0.0 github.com/hpcloud/tail v1.0.0
github.com/jlaffaye/ftp v0.2.0
github.com/robfig/cron/v3 v3.0.1
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.0
golang.org/x/crypto v0.18.0 golang.org/x/crypto v0.18.0
gopkg.in/yaml.v3 v3.0.1
) )
require ( require (
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
github.com/cloudflare/circl v1.3.3 // indirect
github.com/go-mail/mail v2.3.1+incompatible // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
golang.org/x/sys v0.22.0 // indirect golang.org/x/sys v0.22.0 // indirect
golang.org/x/text v0.14.0 // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
) )

71
go.sum
View File

@@ -1,20 +1,43 @@
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
github.com/ProtonMail/gopenpgp/v2 v2.7.5 h1:STOY3vgES59gNgoOt2w0nyHBjKViB/qSg7NjbQWPJkA=
github.com/ProtonMail/gopenpgp/v2 v2.7.5/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E= github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM= github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ= github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM=
github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
@@ -23,16 +46,64 @@ github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3k
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

13
main.go
View File

@@ -1,12 +1,11 @@
// Package main /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package main package main
//main
/*****
* MySQL Backup & Restore
* @author Jonas Kaninda
* @license MIT License <https://opensource.org/licenses/MIT>
* @link https://github.com/jkaninda/mysql-bkup
**/
import "github.com/jkaninda/mysql-bkup/cmd" import "github.com/jkaninda/mysql-bkup/cmd"
func main() { func main() {

View File

@@ -1,13 +1,15 @@
// Package pkg /* // Package pkg /
/* /*****
Copyright © 2024 Jonas Kaninda @author Jonas Kaninda
*/ @license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg package pkg
import ( import (
"fmt" "fmt"
"github.com/hpcloud/tail"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"log" "log"
"os" "os"
@@ -17,134 +19,159 @@ import (
) )
func StartBackup(cmd *cobra.Command) { func StartBackup(cmd *cobra.Command) {
_, _ = cmd.Flags().GetString("operation") intro()
//Set env //Initialize backup configs
utils.SetEnv("STORAGE_PATH", storagePath) config := initBackupConfig(cmd)
utils.GetEnv(cmd, "dbname", "DB_NAME") //Load backup configuration file
utils.GetEnv(cmd, "port", "DB_PORT") configFile, err := loadConfigFile()
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD") if err != nil {
dbConf = initDbConfig(cmd)
//Get flag value and set env if config.cronExpression == "" {
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH") BackupTask(dbConf, config)
storage = utils.GetEnv(cmd, "storage", "STORAGE") } else {
file = utils.GetEnv(cmd, "file", "FILE_NAME") if utils.IsValidCronExpression(config.cronExpression) {
backupRetention, _ := cmd.Flags().GetInt("keep-last") scheduledMode(dbConf, config)
prune, _ := cmd.Flags().GetBool("prune") } else {
disableCompression, _ = cmd.Flags().GetBool("disable-compression") utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
executionMode, _ = cmd.Flags().GetString("mode") }
dbName = os.Getenv("DB_NAME")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
//
if gpqPassphrase != "" {
encryption = true
}
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
if disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
}
if executionMode == "default" {
switch storage {
case "s3":
s3Backup(backupFileName, disableCompression, prune, backupRetention, encryption)
case "local":
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
case "ssh", "remote":
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
case "ftp":
utils.Fatal("Not supported storage type: %s", storage)
default:
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
} }
} else if executionMode == "scheduled" {
scheduledMode(storage)
} else { } else {
utils.Fatal("Error, unknown execution mode!") startMultiBackup(config, configFile)
} }
} }
// Run in scheduled mode // Run in scheduled mode
func scheduledMode(storage string) { func scheduledMode(db *dbConfig, config *BackupConfig) {
fmt.Println()
fmt.Println("**********************************")
fmt.Println(" Starting MySQL Bkup... ")
fmt.Println("***********************************")
utils.Info("Running in Scheduled mode") utils.Info("Running in Scheduled mode")
utils.Info("Execution period %s", os.Getenv("SCHEDULE_PERIOD")) utils.Info("Backup cron expression: %s", config.cronExpression)
utils.Info("Storage type %s ", storage) utils.Info("Storage type %s ", config.storage)
//Test database connexion
utils.TestDatabaseConnection()
//Test backup
utils.Info("Testing backup configurations...")
BackupTask(db, config)
utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...") utils.Info("Creating backup job...")
CreateCrontabScript(disableCompression, storage) // Create a new cron instance
c := cron.New()
supervisorConfig := "/etc/supervisor/supervisord.conf" _, err := c.AddFunc(config.cronExpression, func() {
BackupTask(db, config)
// Start Supervisor })
cmd := exec.Command("supervisord", "-c", supervisorConfig)
err := cmd.Start()
if err != nil { if err != nil {
utils.Fatal(fmt.Sprintf("Failed to start supervisord: %v", err)) return
} }
// Start the cron scheduler
c.Start()
utils.Info("Creating backup job...done")
utils.Info("Backup job started") utils.Info("Backup job started")
defer func() { defer c.Stop()
if err := cmd.Process.Kill(); err != nil { select {}
utils.Info("Failed to kill supervisord process: %v", err) }
} else { func BackupTask(db *dbConfig, config *BackupConfig) {
utils.Info("Supervisor stopped.") utils.Info("Starting backup task...")
} //Generate file name
}() backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) { if config.disableCompression {
utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile)) backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
} }
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true}) config.backupFileName = backupFileName
switch config.storage {
case "local":
localBackup(db, config)
case "s3", "S3":
s3Backup(db, config)
case "ssh", "SSH", "remote":
sshBackup(db, config)
case "ftp", "FTP":
ftpBackup(db, config)
//utils.Fatal("Not supported storage type: %s", config.storage)
default:
localBackup(db, config)
}
}
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
for _, db := range databases {
//Check if path is defined in config file
if db.Path != "" {
bkConfig.remotePath = db.Path
}
BackupTask(getDatabase(db), bkConfig)
}
}
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Starting multiple backup jobs...")
var conf = &Config{}
conf, err := readConf(configFile)
if err != nil { if err != nil {
utils.Fatal("Failed to tail file: %v", err) utils.Fatal("Error reading config file: %s", err)
}
//Check if cronExpression is defined in config file
if conf.CronExpression != "" {
bkConfig.cronExpression = conf.CronExpression
}
// Check if cronExpression is defined
if bkConfig.cronExpression == "" {
multiBackupTask(conf.Databases, bkConfig)
} else {
// Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) {
utils.Info("Running MultiBackup in Scheduled mode")
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
utils.Info("Storage type %s ", bkConfig.storage)
//Test backup
utils.Info("Testing backup configurations...")
multiBackupTask(conf.Databases, bkConfig)
utils.Info("Testing backup configurations...done")
utils.Info("Creating multi backup job...")
// Create a new cron instance
c := cron.New()
_, err := c.AddFunc(bkConfig.cronExpression, func() {
// Create a channel
multiBackupTask(conf.Databases, bkConfig)
})
if err != nil {
return
}
// Start the cron scheduler
c.Start()
utils.Info("Creating multi backup job...done")
utils.Info("Backup job started")
defer c.Stop()
select {}
} else {
utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
}
} }
// Read and print new lines from the log file
for line := range t.Lines {
fmt.Println(line.Text)
}
} }
// BackupDatabase backup database // BackupDatabase backup database
func BackupDatabase(backupFileName string, disableCompression bool) { func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
storagePath = os.Getenv("STORAGE_PATH") storagePath = os.Getenv("STORAGE_PATH")
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
utils.Info("Starting database backup...") utils.Info("Starting database backup...")
utils.TestDatabaseConnection()
err := os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil {
return
}
testDatabaseConnection(db)
// Backup Database database // Backup Database database
utils.Info("Backing up database...") utils.Info("Backing up database...")
// Verify is compression is disabled
if disableCompression { if disableCompression {
// Execute mysqldump // Execute mysqldump
cmd := exec.Command("mysqldump", cmd := exec.Command("mysqldump",
"-h", dbHost, "-h", db.dbHost,
"-P", dbPort, "-P", db.dbPort,
"-u", dbUserName, "-u", db.dbUserName,
"--password="+dbPassword, db.dbName,
dbName,
) )
output, err := cmd.Output() output, err := cmd.Output()
if err != nil { if err != nil {
@@ -152,7 +179,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
} }
// save output // save output
file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName)) file, err := os.Create(filepath.Join(tmpPath, backupFileName))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@@ -166,14 +193,14 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
} else { } else {
// Execute mysqldump // Execute mysqldump
cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName) cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName)
stdout, err := cmd.StdoutPipe() stdout, err := cmd.StdoutPipe()
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
gzipCmd := exec.Command("gzip") gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout gzipCmd.Stdin = stdout
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName)) gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
gzipCmd.Start() gzipCmd.Start()
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@@ -189,93 +216,218 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
} }
} }
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage") utils.Info("Backup database to local storage")
BackupDatabase(backupFileName, disableCompression) startTime = time.Now().Format(utils.TimeFormat())
finalFileName := backupFileName BackupDatabase(db, config.backupFileName, disableCompression)
if encrypt { finalFileName := config.backupFileName
encryptBackup(backupFileName) if config.encryption {
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
} }
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error:", err)
}
//Get backup info
backupSize = fileInfo.Size()
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
moveToBackup(finalFileName, storagePath) moveToBackup(finalFileName, storagePath)
//Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
//Delete old backup //Delete old backup
if prune { if config.prune {
deleteOldBackup(backupRetention) deleteOldBackup(config.backupRetention)
} }
//Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
} }
func s3Backup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func s3Backup(db *dbConfig, config *BackupConfig) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH") s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
if config.remotePath != "" {
s3Path = config.remotePath
}
utils.Info("Backup database to s3 storage") utils.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat())
//Backup database //Backup database
BackupDatabase(backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := backupFileName finalFileName := config.backupFileName
if encrypt { if config.encryption {
encryptBackup(backupFileName) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup file to S3 storage...") utils.Info("Uploading backup archive to remote storage S3 ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path) err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
if err != nil { if err != nil {
utils.Fatal("Error uploading file to S3: %s ", err) utils.Fatal("Error uploading backup archive to S3: %s ", err)
} }
//Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error:", err)
}
backupSize = fileInfo.Size()
//Delete backup file from tmp folder //Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
if err != nil { if err != nil {
fmt.Println("Error deleting file: ", err) fmt.Println("Error deleting file: ", err)
} }
// Delete old backup // Delete old backup
if prune { if config.prune {
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention) err := DeleteOldBackup(bucket, s3Path, config.backupRetention)
if err != nil { if err != nil {
utils.Fatal("Error deleting old backup from S3: %s ", err) utils.Fatal("Error deleting old backup from S3: %s ", err)
} }
} }
utils.Done("Database has been backed up and uploaded to s3 ") utils.Done("Uploading backup archive to remote storage S3 ... done ")
//Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
//Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
} }
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server") utils.Info("Backup database to Remote server")
startTime = time.Now().Format(utils.TimeFormat())
//Backup database //Backup database
BackupDatabase(backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := backupFileName finalFileName := config.backupFileName
if encrypt { if config.encryption {
encryptBackup(backupFileName) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup file to remote server...") utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, remotePath) err := CopyToRemote(finalFileName, config.remotePath)
if err != nil { if err != nil {
utils.Fatal("Error uploading file to the remote server: %s ", err) utils.Fatal("Error uploading file to the remote server: %s ", err)
} }
//Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error:", err)
}
backupSize = fileInfo.Size()
//Delete backup file from tmp folder //Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
fmt.Println("Error deleting file: ", err) utils.Error("Error deleting file: %v", err)
} }
if prune { if config.prune {
//TODO: Delete old backup from remote server //TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet") utils.Info("Deleting old backup from a remote server is not implemented yet")
} }
utils.Done("Database has been backed up and uploaded to remote server ") utils.Done("Uploading backup archive to remote storage ... done ")
//Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
//Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
}
func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat())
//Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName)
err := CopyToFTP(finalFileName, config.remotePath)
if err != nil {
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
}
//Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error:", err)
}
backupSize = fileInfo.Size()
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
//TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet")
}
utils.Done("Uploading backup archive to the remote FTP server ... done ")
//Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
//Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
} }
func encryptBackup(backupFileName string) { func encryptBackup(config *BackupConfig) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE") if config.usingKey {
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase) err := encryptWithGPGPublicKey(filepath.Join(tmpPath, config.backupFileName), config.publicKey)
if err != nil { if err != nil {
utils.Fatal("Error during encrypting backup %s", err) utils.Fatal("error during encrypting backup %v", err)
}
} else if config.passphrase != "" {
err := encryptWithGPG(filepath.Join(tmpPath, config.backupFileName), config.passphrase)
if err != nil {
utils.Fatal("error during encrypting backup %v", err)
}
} }
} }

View File

@@ -1,4 +1,274 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg package pkg
type Config struct { import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
"strconv"
)
type Database struct {
Host string `yaml:"host"`
Port string `yaml:"port"`
Name string `yaml:"name"`
User string `yaml:"user"`
Password string `yaml:"password"`
Path string `yaml:"path"`
}
type Config struct {
Databases []Database `yaml:"databases"`
CronExpression string `yaml:"cronExpression"`
}
type dbConfig struct {
dbHost string
dbPort string
dbName string
dbUserName string
dbPassword string
}
type targetDbConfig struct {
targetDbHost string
targetDbPort string
targetDbUserName string
targetDbPassword string
targetDbName string
}
type TgConfig struct {
Token string
ChatId string
}
type BackupConfig struct {
backupFileName string
backupRetention int
disableCompression bool
prune bool
remotePath string
encryption bool
usingKey bool
passphrase string
publicKey string
storage string
cronExpression string
}
type FTPConfig struct {
host string
user string
password string
port string
remotePath string
}
// SSHConfig holds the SSH connection details
type SSHConfig struct {
user string
password string
hostName string
port string
identifyFile string
}
type AWSConfig struct {
endpoint string
bucket string
accessKey string
secretKey string
region string
disableSsl bool
forcePathStyle bool
}
func initDbConfig(cmd *cobra.Command) *dbConfig {
//Set env
utils.GetEnv(cmd, "dbname", "DB_NAME")
dConf := dbConfig{}
dConf.dbHost = os.Getenv("DB_HOST")
dConf.dbPort = os.Getenv("DB_PORT")
dConf.dbName = os.Getenv("DB_NAME")
dConf.dbUserName = os.Getenv("DB_USERNAME")
dConf.dbPassword = os.Getenv("DB_PASSWORD")
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &dConf
}
func getDatabase(database Database) *dbConfig {
return &dbConfig{
dbHost: database.Host,
dbPort: database.Port,
dbName: database.Name,
dbUserName: database.User,
dbPassword: database.Password,
}
}
// loadSSHConfig loads the SSH configuration from environment variables
func loadSSHConfig() (*SSHConfig, error) {
utils.GetEnvVariable("SSH_HOST", "SSH_HOST_NAME")
sshVars := []string{"SSH_USER", "SSH_HOST", "SSH_PORT", "REMOTE_PATH"}
err := utils.CheckEnvVars(sshVars)
if err != nil {
return nil, fmt.Errorf("error missing environment variables: %w", err)
}
return &SSHConfig{
user: os.Getenv("SSH_USER"),
password: os.Getenv("SSH_PASSWORD"),
hostName: os.Getenv("SSH_HOST"),
port: os.Getenv("SSH_PORT"),
identifyFile: os.Getenv("SSH_IDENTIFY_FILE"),
}, nil
}
func initFtpConfig() *FTPConfig {
//Initialize data configs
fConfig := FTPConfig{}
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
fConfig.user = os.Getenv("FTP_USER")
fConfig.password = os.Getenv("FTP_PASSWORD")
fConfig.port = os.Getenv("FTP_PORT")
fConfig.remotePath = os.Getenv("REMOTE_PATH")
err := utils.CheckEnvVars(ftpVars)
if err != nil {
utils.Error("Please make sure all required environment variables for FTP are set")
utils.Fatal("Error missing environment variables: %s", err)
}
return &fConfig
}
func initAWSConfig() *AWSConfig {
//Initialize AWS configs
aConfig := AWSConfig{}
aConfig.endpoint = utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
aConfig.region = os.Getenv("AWS_REGION")
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
}
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
if err != nil {
utils.Fatal("Unable to parse AWS_FORCE_PATH_STYLE env var: %s", err)
}
aConfig.disableSsl = disableSsl
aConfig.forcePathStyle = forcePathStyle
err = utils.CheckEnvVars(awsVars)
if err != nil {
utils.Error("Please make sure all required environment variables for AWS S3 are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &aConfig
}
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
_, _ = cmd.Flags().GetString("mode")
passphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
publicKeyFile, err := checkPubKeyFile(os.Getenv("GPG_PUBLIC_KEY"))
if err == nil {
encryption = true
usingKey = true
} else if passphrase != "" {
encryption = true
usingKey = false
}
//Initialize backup configs
config := BackupConfig{}
config.backupRetention = backupRetention
config.disableCompression = disableCompression
config.prune = prune
config.storage = storage
config.encryption = encryption
config.remotePath = remotePath
config.passphrase = passphrase
config.publicKey = publicKeyFile
config.usingKey = usingKey
config.cronExpression = cronExpression
return &config
}
type RestoreConfig struct {
s3Path string
remotePath string
storage string
file string
bucket string
usingKey bool
passphrase string
privateKey string
}
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
passphrase := os.Getenv("GPG_PASSPHRASE")
privateKeyFile, err := checkPrKeyFile(os.Getenv("GPG_PRIVATE_KEY"))
if err == nil {
usingKey = true
} else if passphrase != "" {
usingKey = false
}
//Initialize restore configs
rConfig := RestoreConfig{}
rConfig.s3Path = s3Path
rConfig.remotePath = remotePath
rConfig.storage = storage
rConfig.bucket = bucket
rConfig.file = file
rConfig.storage = storage
rConfig.passphrase = passphrase
rConfig.usingKey = usingKey
rConfig.privateKey = privateKeyFile
return &rConfig
}
func initTargetDbConfig() *targetDbConfig {
tdbConfig := targetDbConfig{}
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
err := utils.CheckEnvVars(tdbRVars)
if err != nil {
utils.Error("Please make sure all required environment variables for the target database are set")
utils.Fatal("Error checking target database environment variables: %s", err)
}
return &tdbConfig
}
func loadConfigFile() (string, error) {
backupConfigFile, err := checkConfigFile(os.Getenv("BACKUP_CONFIG_FILE"))
if err == nil {
return backupConfigFile, nil
}
return "", fmt.Errorf("backup config file not found")
} }

View File

@@ -1,42 +1,179 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg package pkg
import ( import (
"errors"
"fmt"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"os" "os"
"os/exec"
"strings" "strings"
) )
func Decrypt(inputFile string, passphrase string) error { // decryptWithGPG decrypts backup file using a passphrase
utils.Info("Decrypting backup file: " + inputFile + " ...") func decryptWithGPG(inputFile string, passphrase string) error {
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile) utils.Info("Decrypting backup using passphrase...")
cmd.Stdout = os.Stdout // Read the encrypted file
cmd.Stderr = os.Stderr encFileContent, err := os.ReadFile(inputFile)
err := cmd.Run()
if err != nil { if err != nil {
return err return errors.New(fmt.Sprintf("Error reading encrypted file: %s", err))
}
// Define the passphrase used to encrypt the file
_passphrase := []byte(passphrase)
// Create a PGP message object from the encrypted file content
encryptedMessage := crypto.NewPGPMessage(encFileContent)
// Decrypt the message using the passphrase
plainMessage, err := crypto.DecryptMessageWithPassword(encryptedMessage, _passphrase)
if err != nil {
return errors.New(fmt.Sprintf("Error decrypting file: %s", err))
} }
// Save the decrypted file (restore it)
err = os.WriteFile(RemoveLastExtension(inputFile), plainMessage.GetBinary(), 0644)
if err != nil {
return errors.New(fmt.Sprintf("Error saving decrypted file: %s", err))
}
utils.Info("Decrypting backup using passphrase...done")
utils.Info("Backup file decrypted successful!") utils.Info("Backup file decrypted successful!")
return nil return nil
} }
func Encrypt(inputFile string, passphrase string) error { // encryptWithGPG encrypts backup using a passphrase
utils.Info("Encrypting backup...") func encryptWithGPG(inputFile string, passphrase string) error {
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile) utils.Info("Encrypting backup using passphrase...")
cmd.Stdout = os.Stdout // Read the file to be encrypted
cmd.Stderr = os.Stderr plainFileContent, err := os.ReadFile(inputFile)
err := cmd.Run()
if err != nil { if err != nil {
return err return errors.New(fmt.Sprintf("Error reading file: %s", err))
} }
// Define the passphrase to encrypt the file
_passphrase := []byte(passphrase)
// Create a message object from the file content
message := crypto.NewPlainMessage(plainFileContent)
// Encrypt the message using the passphrase
encryptedMessage, err := crypto.EncryptMessageWithPassword(message, _passphrase)
if err != nil {
return errors.New(fmt.Sprintf("Error encrypting backup file: %s", err))
}
// Save the encrypted .tar file
err = os.WriteFile(fmt.Sprintf("%s.%s", inputFile, gpgExtension), encryptedMessage.GetBinary(), 0644)
if err != nil {
return errors.New(fmt.Sprintf("Error saving encrypted filee: %s", err))
}
utils.Info("Encrypting backup using passphrase...done")
utils.Info("Backup file encrypted successful!") utils.Info("Backup file encrypted successful!")
return nil return nil
} }
// encryptWithGPGPublicKey encrypts backup using a public key
func encryptWithGPGPublicKey(inputFile string, publicKey string) error {
utils.Info("Encrypting backup using public key...")
// Read the public key
pubKeyBytes, err := os.ReadFile(publicKey)
if err != nil {
return errors.New(fmt.Sprintf("Error reading public key: %s", err))
}
// Create a new keyring with the public key
publicKeyObj, err := crypto.NewKeyFromArmored(string(pubKeyBytes))
if err != nil {
return errors.New(fmt.Sprintf("Error parsing public key: %s", err))
}
keyRing, err := crypto.NewKeyRing(publicKeyObj)
if err != nil {
return errors.New(fmt.Sprintf("Error creating key ring: %v", err))
}
// Read the file to encryptWithGPGPublicKey
fileContent, err := os.ReadFile(inputFile)
if err != nil {
return errors.New(fmt.Sprintf("Error reading file: %v", err))
}
// encryptWithGPG the file
message := crypto.NewPlainMessage(fileContent)
encMessage, err := keyRing.Encrypt(message, nil)
if err != nil {
return errors.New(fmt.Sprintf("Error encrypting file: %v", err))
}
// Save the encrypted file
err = os.WriteFile(fmt.Sprintf("%s.%s", inputFile, gpgExtension), encMessage.GetBinary(), 0644)
if err != nil {
return errors.New(fmt.Sprintf("Error saving encrypted file: %v", err))
}
utils.Info("Encrypting backup using public key...done")
utils.Info("Backup file encrypted successful!")
return nil
}
// decryptWithGPGPrivateKey decrypts backup file using a private key and passphrase.
// privateKey GPG private key
// passphrase GPG passphrase
func decryptWithGPGPrivateKey(inputFile, privateKey, passphrase string) error {
utils.Info("Encrypting backup using private key...")
// Read the private key
priKeyBytes, err := os.ReadFile(privateKey)
if err != nil {
return errors.New(fmt.Sprintf("Error reading private key: %s", err))
}
// Read the password for the private key (if its password-protected)
password := []byte(passphrase)
// Create a key object from the armored private key
privateKeyObj, err := crypto.NewKeyFromArmored(string(priKeyBytes))
if err != nil {
return errors.New(fmt.Sprintf("Error parsing private key: %s", err))
}
// Unlock the private key with the password
if passphrase != "" {
// Unlock the private key with the password
_, err = privateKeyObj.Unlock(password)
if err != nil {
return errors.New(fmt.Sprintf("Error unlocking private key: %s", err))
}
}
// Create a new keyring with the private key
keyRing, err := crypto.NewKeyRing(privateKeyObj)
if err != nil {
return errors.New(fmt.Sprintf("Error creating key ring: %v", err))
}
// Read the encrypted file
encFileContent, err := os.ReadFile(inputFile)
if err != nil {
return errors.New(fmt.Sprintf("Error reading encrypted file: %s", err))
}
// decryptWithGPG the file
encryptedMessage := crypto.NewPGPMessage(encFileContent)
message, err := keyRing.Decrypt(encryptedMessage, nil, 0)
if err != nil {
return errors.New(fmt.Sprintf("Error decrypting file: %s", err))
}
// Save the decrypted file
err = os.WriteFile(RemoveLastExtension(inputFile), message.GetBinary(), 0644)
if err != nil {
return errors.New(fmt.Sprintf("Error saving decrypted file: %s", err))
}
utils.Info("Encrypting backup using public key...done")
fmt.Println("File successfully decrypted!")
return nil
}
func RemoveLastExtension(filename string) string { func RemoveLastExtension(filename string) string {
if idx := strings.LastIndex(filename, "."); idx != -1 { if idx := strings.LastIndex(filename, "."); idx != -1 {
return filename[:idx] return filename[:idx]

81
pkg/ftp.go Normal file
View File

@@ -0,0 +1,81 @@
package pkg
import (
"fmt"
"github.com/jlaffaye/ftp"
"io"
"os"
"path/filepath"
"time"
)
// initFtpClient initializes and authenticates an FTP client
func initFtpClient() (*ftp.ServerConn, error) {
ftpConfig := initFtpConfig()
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
if err != nil {
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
}
return ftpClient, nil
}
// CopyToFTP uploads a file to the remote FTP server
func CopyToFTP(fileName, remotePath string) (err error) {
ftpConfig := initFtpConfig()
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", fileName, err)
}
defer file.Close()
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
err = ftpClient.Stor(remoteFilePath, file)
if err != nil {
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
}
return nil
}
// CopyFromFTP downloads a file from the remote FTP server
func CopyFromFTP(fileName, remotePath string) (err error) {
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
remoteFilePath := filepath.Join(remotePath, fileName)
r, err := ftpClient.Retr(remoteFilePath)
if err != nil {
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
}
defer r.Close()
localFilePath := filepath.Join(tmpPath, fileName)
outFile, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
}
defer outFile.Close()
_, err = io.Copy(outFile, r)
if err != nil {
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
}
return nil
}

View File

@@ -1,9 +1,18 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg package pkg
import ( import (
"bytes"
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"gopkg.in/yaml.v3"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"time" "time"
) )
@@ -71,4 +80,134 @@ func deleteOldBackup(retentionDays int) {
utils.Fatal(fmt.Sprintf("Error: %s", err)) utils.Fatal(fmt.Sprintf("Error: %s", err))
return return
} }
utils.Done("Deleting old backups...done")
}
func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the current item is a file
if !info.IsDir() {
// Delete the file
err = os.Remove(path)
if err != nil {
return err
}
}
return nil
})
if err != nil {
utils.Error("Error deleting files: %v", err)
} else {
utils.Info("Deleting %s ... done", tmpPath)
}
}
// TestDatabaseConnection tests the database connection
func testDatabaseConnection(db *dbConfig) {
err := os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil {
return
}
utils.Info("Connecting to %s database ...", db.dbName)
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err = cmd.Run()
if err != nil {
utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
}
utils.Info("Successfully connected to %s database", db.dbName)
}
func intro() {
utils.Info("Starting MySQL Backup...")
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
}
func checkPubKeyFile(pubKey string) (string, error) {
// Define possible key file names
keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey}
// Loop through key file names and check if they exist
for _, keyFile := range keyFiles {
if _, err := os.Stat(keyFile); err == nil {
// File exists
return keyFile, nil
} else if os.IsNotExist(err) {
// File does not exist, continue to the next one
continue
} else {
// An unexpected error occurred
return "", err
}
}
// Return an error if neither file exists
return "", fmt.Errorf("no public key file found")
}
func checkPrKeyFile(prKey string) (string, error) {
// Define possible key file names
keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey}
// Loop through key file names and check if they exist
for _, keyFile := range keyFiles {
if _, err := os.Stat(keyFile); err == nil {
// File exists
return keyFile, nil
} else if os.IsNotExist(err) {
// File does not exist, continue to the next one
continue
} else {
// An unexpected error occurred
return "", err
}
}
// Return an error if neither file exists
return "", fmt.Errorf("no public key file found")
}
func readConf(configFile string) (*Config, error) {
//configFile := filepath.Join("./", filename)
if utils.FileExists(configFile) {
buf, err := os.ReadFile(configFile)
if err != nil {
return nil, err
}
c := &Config{}
err = yaml.Unmarshal(buf, c)
if err != nil {
return nil, fmt.Errorf("in file %q: %w", configFile, err)
}
return c, err
}
return nil, fmt.Errorf("config file %q not found", configFile)
}
func checkConfigFile(filePath string) (string, error) {
// Define possible config file names
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}
// Loop through config file names and check if they exist
for _, configFile := range configFiles {
if _, err := os.Stat(configFile); err == nil {
// File exists
return configFile, nil
} else if os.IsNotExist(err) {
// File does not exist, continue to the next one
continue
} else {
// An unexpected error occurred
return "", err
}
}
// Return an error if neither file exists
return "", fmt.Errorf("no config file found")
} }

42
pkg/migrate.go Normal file
View File

@@ -0,0 +1,42 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"time"
)
func StartMigration(cmd *cobra.Command) {
intro()
utils.Info("Starting database migration...")
//Get DB config
dbConf = initDbConfig(cmd)
targetDbConf = initTargetDbConfig()
//Defining the target database variables
newDbConfig := dbConfig{}
newDbConfig.dbHost = targetDbConf.targetDbHost
newDbConfig.dbPort = targetDbConf.targetDbPort
newDbConfig.dbName = targetDbConf.targetDbName
newDbConfig.dbUserName = targetDbConf.targetDbUserName
newDbConfig.dbPassword = targetDbConf.targetDbPassword
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
conf := &RestoreConfig{}
conf.file = backupFileName
//Backup source Database
BackupDatabase(dbConf, backupFileName, true)
//Restore source database into target database
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
RestoreDatabase(&newDbConfig, conf)
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
utils.Info("Database migration completed.")
}

View File

@@ -1,7 +1,12 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg package pkg
import ( import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
@@ -10,114 +15,121 @@ import (
) )
func StartRestore(cmd *cobra.Command) { func StartRestore(cmd *cobra.Command) {
intro()
dbConf = initDbConfig(cmd)
restoreConf := initRestoreConfig(cmd)
//Set env switch restoreConf.storage {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
//Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
switch storage {
case "s3":
restoreFromS3(file, bucket, s3Path)
case "local": case "local":
utils.Info("Restore database from local") utils.Info("Restore database from local")
copyToTmp(storagePath, file) copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(file) RestoreDatabase(dbConf, restoreConf)
case "ssh": case "s3", "S3":
restoreFromRemote(file, remotePath) restoreFromS3(dbConf, restoreConf)
case "ftp": case "ssh", "SSH", "remote":
utils.Fatal("Restore from FTP is not yet supported") restoreFromRemote(dbConf, restoreConf)
case "ftp", "FTP":
restoreFromFTP(dbConf, restoreConf)
default: default:
utils.Info("Restore database from local") utils.Info("Restore database from local")
RestoreDatabase(file) copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(dbConf, restoreConf)
} }
} }
func restoreFromS3(file, bucket, s3Path string) { func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from s3") utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path) err := DownloadFile(tmpPath, conf.file, conf.bucket, conf.s3Path)
if err != nil { if err != nil {
utils.Fatal(fmt.Sprintf("Error download file from s3 %s %s", file, err)) utils.Fatal("Error download file from s3 %s %v ", conf.file, err)
} }
RestoreDatabase(file) RestoreDatabase(db, conf)
} }
func restoreFromRemote(file, remotePath string) { func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from remote server") utils.Info("Restore database from remote server")
err := CopyFromRemote(file, remotePath) err := CopyFromRemote(conf.file, conf.remotePath)
if err != nil { if err != nil {
utils.Fatal(fmt.Sprintf("Error download file from remote server: ", filepath.Join(remotePath, file), err)) utils.Fatal("Error download file from remote server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
} }
RestoreDatabase(file) RestoreDatabase(db, conf)
}
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from FTP server")
err := CopyFromFTP(conf.file, conf.remotePath)
if err != nil {
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
}
RestoreDatabase(db, conf)
} }
// RestoreDatabase restore database // RestoreDatabase restore database
func RestoreDatabase(file string) { func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
dbHost = os.Getenv("DB_HOST") if conf.file == "" {
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
if file == "" {
utils.Fatal("Error, file required") utils.Fatal("Error, file required")
} }
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
if extension == ".gpg" { if extension == ".gpg" {
if gpgPassphrase == "" {
utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
} else { if conf.usingKey {
//Decrypt file utils.Warn("Backup decryption using a private key is not fully supported")
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase) err := decryptWithGPGPrivateKey(filepath.Join(tmpPath, conf.file), conf.privateKey, conf.passphrase)
if err != nil { if err != nil {
utils.Fatal("Error decrypting file %s %v", file, err) utils.Fatal("error during decrypting backup %v", err)
}
} else {
if conf.passphrase == "" {
utils.Error("Error, passphrase or private key required")
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
} else {
//decryptWithGPG file
err := decryptWithGPG(filepath.Join(tmpPath, conf.file), conf.passphrase)
if err != nil {
utils.Fatal("Error decrypting file %s %v", file, err)
}
//Update file name
conf.file = RemoveLastExtension(file)
} }
//Update file name
file = RemoveLastExtension(file)
} }
} }
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) { if utils.FileExists(filepath.Join(tmpPath, conf.file)) {
utils.TestDatabaseConnection() err := os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil {
return
}
testDatabaseConnection(db)
utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file)) extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
// Restore from compressed file / .sql.gz // Restore from compressed file / .sql.gz
if extension == ".gz" { if extension == ".gz" {
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME") str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal("Error, in restoring the database %v", err) utils.Fatal("Error, in restoring the database %v", err)
} }
utils.Info("Restoring database... done")
utils.Done("Database has been restored") utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else if extension == ".sql" { } else if extension == ".sql" {
//Restore from sql file //Restore from sql file
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME") str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err)) utils.Fatal("Error in restoring the database %v", err)
} }
utils.Info("Restoring database... done")
utils.Done("Database has been restored") utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else { } else {
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension)) utils.Fatal("Unknown file extension %s", extension)
} }
} else { } else {
utils.Fatal(fmt.Sprintf("File not found in %s", fmt.Sprintf("%s/%s", tmpPath, file))) utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
} }
} }

View File

@@ -1,56 +1,35 @@
package utils // Package pkg
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import ( import (
"bytes" "bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager"
"log" "github.com/jkaninda/mysql-bkup/utils"
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"time" "time"
) )
// CreateSession creates a new AWS session // CreateSession creates a new AWS session
func CreateSession() (*session.Session, error) { func CreateSession() (*session.Session, error) {
// AwsVars Required environment variables for AWS S3 storage awsConfig := initAWSConfig()
var awsVars = []string{ // Configure to use MinIO Server
"AWS_S3_ENDPOINT",
"AWS_S3_BUCKET_NAME",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY",
"AWS_REGION",
"AWS_REGION",
"AWS_REGION",
}
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
region := os.Getenv("AWS_REGION")
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
}
err = CheckEnvVars(awsVars)
if err != nil {
Fatal("Error checking environment variables\n: %s", err)
}
// S3 Config
s3Config := &aws.Config{ s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""), Credentials: credentials.NewStaticCredentials(awsConfig.accessKey, awsConfig.secretKey, ""),
Endpoint: aws.String(endPoint), Endpoint: aws.String(awsConfig.endpoint),
Region: aws.String(region), Region: aws.String(awsConfig.region),
DisableSSL: aws.Bool(awsDisableSsl), DisableSSL: aws.Bool(awsConfig.disableSsl),
S3ForcePathStyle: aws.Bool(true), S3ForcePathStyle: aws.Bool(awsConfig.forcePathStyle),
} }
return session.NewSession(s3Config) return session.NewSession(s3Config)
@@ -102,10 +81,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
if err != nil { if err != nil {
return err return err
} }
Info("Download backup from S3 storage...") utils.Info("Download data from S3 storage...")
file, err := os.Create(filepath.Join(destinationPath, key)) file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil { if err != nil {
fmt.Println("Failed to create file", err) utils.Error("Failed to create file", err)
return err return err
} }
defer file.Close() defer file.Close()
@@ -119,10 +98,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
Key: aws.String(objectKey), Key: aws.String(objectKey),
}) })
if err != nil { if err != nil {
fmt.Println("Failed to download file", err) utils.Error("Failed to download file %s", key)
return err return err
} }
Info(fmt.Sprintf("Backup downloaded: ", file.Name(), " bytes size ", numBytes)) utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
return nil return nil
} }
@@ -152,18 +131,18 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
Key: object.Key, Key: object.Key,
}) })
if err != nil { if err != nil {
log.Printf("Failed to delete object %s: %v", *object.Key, err) utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else { } else {
fmt.Printf("Deleted object %s\n", *object.Key) utils.Info("Deleted object %s\n", *object.Key)
} }
} }
} }
return !lastPage return !lastPage
}) })
if err != nil { if err != nil {
log.Fatalf("Failed to list objects: %v", err) utils.Error("Failed to list objects: %v", err)
} }
fmt.Println("Finished deleting old files.") utils.Info("Finished deleting old files.")
return nil return nil
} }

View File

@@ -1,3 +1,9 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg package pkg
import ( import (
@@ -12,83 +18,73 @@ import (
"path/filepath" "path/filepath"
) )
func CopyToRemote(fileName, remotePath string) error { // createSSHClientConfig sets up the SSH client configuration based on the provided SSHConfig
sshUser := os.Getenv("SSH_USER") func createSSHClientConfig(sshConfig *SSHConfig) (ssh.ClientConfig, error) {
sshPassword := os.Getenv("SSH_PASSWORD") if sshConfig.identifyFile != "" && utils.FileExists(sshConfig.identifyFile) {
sshHostName := os.Getenv("SSH_HOST_NAME") return auth.PrivateKey(sshConfig.user, sshConfig.identifyFile, ssh.InsecureIgnoreHostKey())
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
err := utils.CheckEnvVars(sshHVars)
if err != nil {
utils.Error("Error checking environment variables: %s", err)
os.Exit(1)
}
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
} else { } else {
if sshPassword == "" { if sshConfig.password == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty") return ssh.ClientConfig{}, errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
} }
utils.Warn("Accessing the remote server using password, password is not recommended") utils.Warn("Accessing the remote server using password, which is not recommended.")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) return auth.PasswordKey(sshConfig.user, sshConfig.password, ssh.InsecureIgnoreHostKey())
} }
}
// CopyToRemote copies a file to a remote server via SCP
func CopyToRemote(fileName, remotePath string) error {
// Load environment variables
sshConfig, err := loadSSHConfig()
if err != nil {
return fmt.Errorf("failed to load SSH configuration: %w", err)
}
// Initialize SSH client config
clientConfig, err := createSSHClientConfig(sshConfig)
if err != nil {
return fmt.Errorf("failed to create SSH client config: %w", err)
}
// Create a new SCP client // Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig) client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
// Connect to the remote server // Connect to the remote server
err = client.Connect() err = client.Connect()
if err != nil { if err != nil {
return errors.New("Couldn't establish a connection to the remote server") return errors.New("Couldn't establish a connection to the remote server\n")
} }
// Open a file // Open the local file
file, _ := os.Open(filepath.Join(tmpPath, fileName)) filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
// Close client connection after the file has been copied if err != nil {
return fmt.Errorf("failed to open file %s: %w", filePath, err)
}
defer client.Close() defer client.Close()
// Close the file after it has been copied // Copy file to the remote server
defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655") err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
if err != nil { if err != nil {
fmt.Println("Error while copying file ") return fmt.Errorf("failed to copy file to remote server: %w", err)
return err
} }
return nil return nil
} }
func CopyFromRemote(fileName, remotePath string) error { func CopyFromRemote(fileName, remotePath string) error {
sshUser := os.Getenv("SSH_USER") // Load environment variables
sshPassword := os.Getenv("SSH_PASSWORD") sshConfig, err := loadSSHConfig()
sshHostName := os.Getenv("SSH_HOST_NAME")
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
err := utils.CheckEnvVars(sshHVars)
if err != nil { if err != nil {
utils.Error("Error checking environment variables\n: %s", err) return fmt.Errorf("failed to load SSH configuration: %w", err)
os.Exit(1)
} }
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) // Initialize SSH client config
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) { clientConfig, err := createSSHClientConfig(sshConfig)
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey()) if err != nil {
return fmt.Errorf("failed to create SSH client config: %w", err)
} else {
if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
}
utils.Warn("Accessing the remote server using password, password is not recommended")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
} }
// Create a new SCP client // Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig) client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
// Connect to the remote server // Connect to the remote server
err = client.Connect() err = client.Connect()
@@ -107,7 +103,7 @@ func CopyFromRemote(fileName, remotePath string) error {
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName)) err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
if err != nil { if err != nil {
fmt.Println("Error while copying file ", err) utils.Error("Error while copying file %s ", err)
return err return err
} }
return nil return nil

View File

@@ -1,69 +0,0 @@
package pkg
// Package pkg /*
/*
Copyright © 2024 Jonas Kaninda
*/
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"os/exec"
)
func CreateCrontabScript(disableCompression bool, storage string) {
//task := "/usr/local/bin/backup_cron.sh"
touchCmd := exec.Command("touch", backupCronFile)
if err := touchCmd.Run(); err != nil {
utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
}
var disableC = ""
if disableCompression {
disableC = "--disable-compression"
}
scriptContent := fmt.Sprintf(`#!/usr/bin/env bash
set -e
bkup backup --dbname %s --port %s --storage %s %v
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), storage, disableC)
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
}
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
if err := chmodCmd.Run(); err != nil {
utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
}
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
if err := lnCmd.Run(); err != nil {
utils.Fatal("Error creating symbolic link: %v\n", err)
}
touchLogCmd := exec.Command("touch", cronLogFile)
if err := touchLogCmd.Run(); err != nil {
utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
}
cronJob := "/etc/cron.d/backup_cron"
touchCronCmd := exec.Command("touch", cronJob)
if err := touchCronCmd.Run(); err != nil {
utils.Fatal("Error creating file %s: %v\n", cronJob, err)
}
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
}
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
crontabCmd := exec.Command("crontab", "/etc/cron.d/backup_cron")
if err := crontabCmd.Run(); err != nil {
utils.Fatal("Error updating crontab: ", err)
}
utils.Info("Backup job created.")
}

View File

@@ -1,23 +1,27 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg package pkg
const cronLogFile = "/var/log/mysql-bkup.log" const cronLogFile = "/var/log/mysql-bkup.log"
const tmpPath = "/tmp/backup" const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
const algorithm = "aes256" const algorithm = "aes256"
const gpgHome = "/config/gnupg"
const gpgExtension = "gpg" const gpgExtension = "gpg"
const workingDir = "/config"
var ( var (
storage = "local" storage = "local"
file = "" file = ""
dbPassword = "" storagePath = "/backup"
dbUserName = "" disableCompression = false
dbName = "" encryption = false
dbHost = "" usingKey = false
dbPort = "3306" backupSize int64 = 0
executionMode = "default" startTime string
storagePath = "/backup"
disableCompression = false
encryption = false
) )
// dbHVars Required environment variables for database // dbHVars Required environment variables for database
@@ -27,11 +31,36 @@ var dbHVars = []string{
"DB_USERNAME", "DB_USERNAME",
"DB_NAME", "DB_NAME",
} }
var tdbRVars = []string{
"TARGET_DB_HOST",
"TARGET_DB_PORT",
"TARGET_DB_NAME",
"TARGET_DB_USERNAME",
"TARGET_DB_PASSWORD",
}
var dbConf *dbConfig
var targetDbConf *targetDbConfig
// sshHVars Required environment variables for SSH remote server storage // sshHVars Required environment variables for SSH remote server storage
var sshHVars = []string{ var sshHVars = []string{
"SSH_USER", "SSH_USER",
"SSH_REMOTE_PATH", "REMOTE_PATH",
"SSH_HOST_NAME", "SSH_HOST_NAME",
"SSH_PORT", "SSH_PORT",
} }
var ftpVars = []string{
"FTP_HOST_NAME",
"FTP_USER",
"FTP_PASSWORD",
"FTP_PORT",
}
// AwsVars Required environment variables for AWS S3 storage
var awsVars = []string{
"AWS_S3_ENDPOINT",
"AWS_S3_BUCKET_NAME",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY",
"AWS_REGION",
}

View File

@@ -1,8 +0,0 @@
#!/bin/sh
DB_USERNAME='db_username'
DB_PASSWORD='password'
DB_HOST='db_hostname'
DB_NAME='db_name'
BACKUP_DIR="$PWD/backup"
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:latest backup -d $DB_NAME

View File

@@ -0,0 +1,18 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>🔴 Urgent: Database Backup Failure Notification</title>
</head>
<body>
<h2>Hi,</h2>
<p>An error occurred during database backup.</p>
<h3>Failure Details:</h3>
<ul>
<li>Error Message: {{.Error}}</li>
<li>Date: {{.EndTime}}</li>
<li>Backup Reference: {{.BackupReference}} </li>
</ul>
<p>©2024 <a href="github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
</body>
</html>

24
templates/email.template Normal file
View File

@@ -0,0 +1,24 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>✅ Database Backup Notification {{.Database}}</title>
</head>
<body>
<h2>Hi,</h2>
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
<h3>Backup Details:</h3>
<ul>
<li>Database Name: {{.Database}}</li>
<li>Backup Start Time: {{.StartTime}}</li>
<li>Backup End Time: {{.EndTime}}</li>
<li>Backup Storage: {{.Storage}}</li>
<li>Backup Location: {{.BackupLocation}}</li>
<li>Backup Size: {{.BackupSize}} bytes</li>
<li>Backup Reference: {{.BackupReference}} </li>
</ul>
<p>Best regards,</p>
<p>©2024 <a href="github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
<href>
</body>
</html>

View File

@@ -0,0 +1,8 @@
🔴 Urgent: Database Backup Failure Notification
Hi,
An error occurred during database backup.
Failure Details:
- Date: {{.EndTime}}
- Backup Reference: {{.BackupReference}}
- Error Message: {{.Error}}

View File

@@ -0,0 +1,12 @@
[✅ Database Backup Notification {{.Database}}
Hi,
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
Backup Details:
- Database Name: {{.Database}}
- Backup Start Time: {{.StartTime}}
- Backup EndTime: {{.EndTime}}
- Backup Storage: {{.Storage}}
- Backup Location: {{.BackupLocation}}
- Backup Size: {{.BackupSize}} bytes
- Backup Reference: {{.BackupReference}}

59
utils/config.go Normal file
View File

@@ -0,0 +1,59 @@
package utils
import "os"
type MailConfig struct {
MailHost string
MailPort int
MailUserName string
MailPassword string
MailTo string
MailFrom string
SkipTls bool
}
type NotificationData struct {
File string
BackupSize int64
Database string
StartTime string
EndTime string
Storage string
BackupLocation string
BackupReference string
}
type ErrorMessage struct {
Database string
EndTime string
Error string
BackupReference string
}
// loadMailConfig gets mail environment variables and returns MailConfig
func loadMailConfig() *MailConfig {
return &MailConfig{
MailHost: os.Getenv("MAIL_HOST"),
MailPort: GetIntEnv("MAIL_PORT"),
MailUserName: os.Getenv("MAIL_USERNAME"),
MailPassword: os.Getenv("MAIL_PASSWORD"),
MailTo: os.Getenv("MAIL_TO"),
MailFrom: os.Getenv("MAIL_FROM"),
SkipTls: os.Getenv("MAIL_SKIP_TLS") == "false",
}
}
// TimeFormat returns the format of the time
func TimeFormat() string {
format := os.Getenv("TIME_FORMAT")
if format == "" {
return "2006-01-02 at 15:04:05"
}
return format
}
func backupReference() string {
return os.Getenv("BACKUP_REFERENCE")
}
const templatePath = "/config/templates"

View File

@@ -1,10 +1,16 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils package utils
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" + const RestoreExample = "restore --dbname database --file db_20231219_022941.sql.gz\n" +
"bkup restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz" "restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" + const BackupExample = "backup --dbname database --disable-compression\n" +
"mysql-bkup backup --dbname database --storage s3 --path /custom-path --disable-compression" "backup --dbname database --storage s3 --path /custom-path --disable-compression"
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" + const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
"mysql-bkup backup --dbname database --storage s3 --path /custom-path\n" + "backup --dbname database --storage s3 --path /custom-path\n" +
"mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz" "restore --dbname database --file db_20231219_022941.sql.gz"

View File

@@ -1,3 +1,9 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils package utils
import ( import (
@@ -6,9 +12,8 @@ import (
"time" "time"
) )
var currentTime = time.Now().Format("2006/01/02 15:04:05")
func Info(msg string, args ...any) { func Info(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...) formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 { if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg) fmt.Printf("%s INFO: %s\n", currentTime, msg)
@@ -19,6 +24,7 @@ func Info(msg string, args ...any) {
// Warn warning message // Warn warning message
func Warn(msg string, args ...any) { func Warn(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...) formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 { if len(args) == 0 {
fmt.Printf("%s WARN: %s\n", currentTime, msg) fmt.Printf("%s WARN: %s\n", currentTime, msg)
@@ -27,6 +33,7 @@ func Warn(msg string, args ...any) {
} }
} }
func Error(msg string, args ...any) { func Error(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...) formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 { if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg) fmt.Printf("%s ERROR: %s\n", currentTime, msg)
@@ -35,6 +42,7 @@ func Error(msg string, args ...any) {
} }
} }
func Done(msg string, args ...any) { func Done(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...) formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 { if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg) fmt.Printf("%s INFO: %s\n", currentTime, msg)
@@ -45,12 +53,17 @@ func Done(msg string, args ...any) {
// Fatal logs an error message and exits the program // Fatal logs an error message and exits the program
func Fatal(msg string, args ...any) { func Fatal(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
// Fatal logs an error message and exits the program. // Fatal logs an error message and exits the program.
formattedMessage := fmt.Sprintf(msg, args...) formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 { if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg) fmt.Printf("%s ERROR: %s\n", currentTime, msg)
NotifyError(msg)
} else { } else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage) fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
NotifyError(formattedMessage)
} }
os.Exit(1) os.Exit(1)
} }

183
utils/notification.go Normal file
View File

@@ -0,0 +1,183 @@
package utils
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/go-mail/mail"
"github.com/robfig/cron/v3"
"html/template"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
func parseTemplate[T any](data T, fileName string) (string, error) {
// Open the file
tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName))
if err != nil {
return "", err
}
var buf bytes.Buffer
if err = tmpl.Execute(&buf, data); err != nil {
return "", err
}
return buf.String(), nil
}
func SendEmail(subject, body string) error {
Info("Start sending email notification....")
config := loadMailConfig()
emails := strings.Split(config.MailTo, ",")
m := mail.NewMessage()
m.SetHeader("From", config.MailFrom)
m.SetHeader("To", emails...)
m.SetHeader("Subject", subject)
m.SetBody("text/html", body)
d := mail.NewDialer(config.MailHost, config.MailPort, config.MailUserName, config.MailPassword)
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
if err := d.DialAndSend(m); err != nil {
Error("Error could not send email : %v", err)
return err
}
Info("Email notification has been sent")
return nil
}
func sendMessage(msg string) error {
Info("Sending Telegram notification... ")
chatId := os.Getenv("TG_CHAT_ID")
body, _ := json.Marshal(map[string]string{
"chat_id": chatId,
"text": msg,
})
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
// Create an HTTP post request
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
panic(err)
}
request.Header.Add("Content-Type", "application/json")
client := &http.Client{}
response, err := client.Do(request)
if err != nil {
return err
}
code := response.StatusCode
if code == 200 {
Info("Telegram notification has been sent")
return nil
} else {
body, _ := ioutil.ReadAll(response.Body)
Error("Error could not send message, error: %s", string(body))
return fmt.Errorf("error could not send message %s", string(body))
}
}
func NotifySuccess(notificationData *NotificationData) {
notificationData.BackupReference = backupReference()
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
var mailVars = []string{
"MAIL_HOST",
"MAIL_PORT",
"MAIL_USERNAME",
"MAIL_PASSWORD",
"MAIL_FROM",
"MAIL_TO",
}
//Email notification
err := CheckEnvVars(mailVars)
if err == nil {
body, err := parseTemplate(*notificationData, "email.template")
if err != nil {
Error("Could not parse email template: %v", err)
}
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification %s", notificationData.Database), body)
if err != nil {
Error("Could not send email: %v", err)
}
}
//Telegram notification
err = CheckEnvVars(vars)
if err == nil {
message, err := parseTemplate(*notificationData, "telegram.template")
if err != nil {
Error("Could not parse telegram template: %v", err)
}
err = sendMessage(message)
if err != nil {
Error("Could not send Telegram message: %v", err)
}
}
}
func NotifyError(error string) {
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
var mailVars = []string{
"MAIL_HOST",
"MAIL_PORT",
"MAIL_USERNAME",
"MAIL_PASSWORD",
"MAIL_FROM",
"MAIL_TO",
}
//Email notification
err := CheckEnvVars(mailVars)
if err == nil {
body, err := parseTemplate(ErrorMessage{
Error: error,
EndTime: time.Now().Format(TimeFormat()),
BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "email-error.template")
if err != nil {
Error("Could not parse error template: %v", err)
}
err = SendEmail(fmt.Sprintf("🔴 Urgent: Database Backup Failure Notification"), body)
if err != nil {
Error("Could not send email: %v", err)
}
}
//Telegram notification
err = CheckEnvVars(vars)
if err == nil {
message, err := parseTemplate(ErrorMessage{
Error: error,
EndTime: time.Now().Format(TimeFormat()),
BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "telegram-error.template")
if err != nil {
Error("Could not parse error template: %v", err)
}
err = sendMessage(message)
if err != nil {
Error("Could not send telegram message: %v", err)
}
}
}
func getTgUrl() string {
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
}
func IsValidCronExpression(cronExpr string) bool {
_, err := cron.ParseStandard(cronExpr)
return err == nil
}

View File

@@ -1,21 +1,21 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils package utils
/*****
* MySQL Backup & Restore
* @author Jonas Kaninda
* @license MIT License <https://opensource.org/licenses/MIT>
* @link https://github.com/jkaninda/mysql-bkup
**/
import ( import (
"bytes"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"io" "io"
"io/fs" "io/fs"
"os" "os"
"os/exec" "strconv"
) )
// FileExists checks if the file does exist
func FileExists(filename string) bool { func FileExists(filename string) bool {
info, err := os.Stat(filename) info, err := os.Stat(filename)
if os.IsNotExist(err) { if os.IsNotExist(err) {
@@ -90,34 +90,6 @@ func IsDirEmpty(name string) (bool, error) {
return true, nil return true, nil
} }
// TestDatabaseConnection tests the database connection
func TestDatabaseConnection() {
dbHost := os.Getenv("DB_HOST")
dbPassword := os.Getenv("DB_PASSWORD")
dbUserName := os.Getenv("DB_USERNAME")
dbName := os.Getenv("DB_NAME")
dbPort := os.Getenv("DB_PORT")
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
Fatal("Please make sure all required database environment variables are set")
} else {
Info("Connecting to database ...")
cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Run()
if err != nil {
Error("Error testing database connection: %v\nOutput: %s", err, out.String())
os.Exit(1)
}
Info("Successfully connected to database")
}
}
func GetEnv(cmd *cobra.Command, flagName, envName string) string { func GetEnv(cmd *cobra.Command, flagName, envName string) string {
value, _ := cmd.Flags().GetString(flagName) value, _ := cmd.Flags().GetString(flagName)
if value != "" { if value != "" {
@@ -157,14 +129,11 @@ func GetEnvVariable(envName, oldEnvName string) string {
if err != nil { if err != nil {
return value return value
} }
Warn("%s is deprecated, please use %s instead!", oldEnvName, envName) Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
} }
} }
return value return value
} }
func ShowHistory() {
}
// CheckEnvVars checks if all the specified environment variables are set // CheckEnvVars checks if all the specified environment variables are set
func CheckEnvVars(vars []string) error { func CheckEnvVars(vars []string) error {
@@ -182,3 +151,32 @@ func CheckEnvVars(vars []string) error {
return nil return nil
} }
// MakeDir create directory
func MakeDir(dirPath string) error {
err := os.Mkdir(dirPath, 0700)
if err != nil {
return err
}
return nil
}
// MakeDirAll create directory
func MakeDirAll(dirPath string) error {
err := os.MkdirAll(dirPath, 0700)
if err != nil {
return err
}
return nil
}
func GetIntEnv(envName string) int {
val := os.Getenv(envName)
if val == "" {
return 0
}
ret, err := strconv.Atoi(val)
if err != nil {
Error("Error: %v", err)
}
return ret
}