mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 13:39:41 +01:00
Compare commits
132 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| df0efd24d3 | |||
|
|
e5dd7e76ce | ||
| 12fbb67a09 | |||
|
|
df490af7b6 | ||
| d930c3e2f6 | |||
|
|
e4258cb12e | ||
| 4c44166921 | |||
| 554df819ab | |||
|
|
ca5633882e | ||
| c5cca82841 | |||
|
|
bbd5422089 | ||
|
|
d72156f890 | ||
|
|
909a50dbe7 | ||
|
|
94ceb71da2 | ||
|
|
fe05fe5110 | ||
| dabba2050a | |||
|
|
47e1ac407b | ||
| 28f6ed3a82 | |||
|
|
504926c7cd | ||
| 737f473f92 | |||
|
|
300d2a8205 | ||
|
|
a4ad0502cf | ||
| f344867edf | |||
|
|
d774584f64 | ||
| 96927cd57e | |||
|
|
ceacfa1d9d | ||
|
|
9380a18b45 | ||
|
|
d186071df9 | ||
|
|
71429b0e1a | ||
|
|
0bed86ded4 | ||
|
|
e891801125 | ||
|
|
01cf8a3392 | ||
|
|
efea81833a | ||
|
|
1cbf65d686 | ||
|
|
73d19913f8 | ||
|
|
b0224e43ef | ||
|
|
fa0485bb5a | ||
|
|
65ef6d3e8f | ||
|
|
a7b6abb101 | ||
|
|
3b21c109bc | ||
|
|
a50a1ef6f9 | ||
|
|
76bbfa35c4 | ||
|
|
599d93bef4 | ||
|
|
247e90f73e | ||
|
|
7d544aca68 | ||
|
|
1722ee0eeb | ||
|
|
726fd14831 | ||
|
|
fdc88e6064 | ||
|
|
2ba1b516e9 | ||
|
|
301594676b | ||
|
|
d06f2f2d7e | ||
|
|
2f06bd1c3a | ||
|
|
f383f5559d | ||
|
|
3725809d28 | ||
|
|
b1598ef7d0 | ||
|
|
e4a83b9851 | ||
|
|
4b2527f416 | ||
|
|
e97fc7512a | ||
|
|
7912ce46ed | ||
|
|
050f5e81bc | ||
|
|
b39e97b77d | ||
|
|
cbb73ae89b | ||
|
|
29a58aa26d | ||
|
|
041e0a07e9 | ||
|
|
9daac9c654 | ||
|
|
f6098769cd | ||
|
|
5cdfaa4d94 | ||
|
|
b205cd61ea | ||
|
|
e1307250e8 | ||
|
|
17ac951deb | ||
|
|
6e2e08224d | ||
|
|
570b775f48 | ||
|
|
e38e106983 | ||
|
|
3040420a09 | ||
|
|
eac5f70408 | ||
|
|
3476c6f529 | ||
|
|
1a9c8483f8 | ||
|
|
f8722f7ae4 | ||
|
|
421bf12910 | ||
|
|
3da4a27baa | ||
|
|
0881f075ef | ||
|
|
066e73f8e4 | ||
|
|
645243ff77 | ||
|
|
9384998127 | ||
|
|
390e7dad0c | ||
|
|
67ea22385f | ||
|
|
cde82d8cfc | ||
|
|
4808f093e5 | ||
|
|
c7a03861fe | ||
|
|
36ec63d522 | ||
|
|
0f07de1d83 | ||
|
|
ae55839996 | ||
|
|
a7f7e57a0d | ||
|
|
b2ddaec93b | ||
|
|
b3570d774c | ||
|
|
38f7e91c03 | ||
|
|
07c2935925 | ||
|
|
f3c5585051 | ||
|
|
7163d030a5 | ||
|
|
a2cec86e73 | ||
|
|
662b73579d | ||
| c9f8a32de1 | |||
| 8fb008151c | |||
| 113c84c885 | |||
| 58deb92953 | |||
| c41afb8b57 | |||
| 02e51a3933 | |||
| db4061b64b | |||
| 9467b157aa | |||
| c229ebdc9d | |||
| 7b701d1740 | |||
| ad6f190bad | |||
| de4dcaaeca | |||
| 17c0a99bda | |||
| b1c9abf931 | |||
| a70a893c11 | |||
| 243e25f4fb | |||
| cb0dcf4104 | |||
| d26d8d31c9 | |||
| 71d438ba76 | |||
| a3fc58af96 | |||
| 08ca6d4a39 | |||
| 27b9ab5f36 | |||
| 6d6db7061b | |||
| d90647aae7 | |||
| 5c2c05499f | |||
| 88ada6fefd | |||
| e6c8b0923d | |||
| 59a136039c | |||
| db835e81c4 | |||
| 5b05bcbf0c | |||
| b8277c8464 |
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
ko_fi: jkaninda
|
||||
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@@ -25,8 +25,10 @@ jobs:
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
file: "./docker/Dockerfile"
|
||||
file: "./Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
build-args: |
|
||||
appVersion=develop-${{ github.sha }}
|
||||
tags: |
|
||||
"${{env.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"
|
||||
"${{vars.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"
|
||||
|
||||
|
||||
14
.github/workflows/release.yml
vendored
14
.github/workflows/release.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Release
|
||||
name: CI
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
@@ -39,11 +39,13 @@ jobs:
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
file: "./docker/Dockerfile"
|
||||
file: "./Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
build-args: |
|
||||
appVersion=${{ env.TAG_NAME }}
|
||||
tags: |
|
||||
"${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
||||
"${{env.BUILDKIT_IMAGE}}:latest"
|
||||
"ghcr.io/${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
||||
"ghcr.io/${{env.BUILDKIT_IMAGE}}:latest"
|
||||
"${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
||||
"${{vars.BUILDKIT_IMAGE}}:latest"
|
||||
"ghcr.io/${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
||||
"ghcr.io/${{vars.BUILDKIT_IMAGE}}:latest"
|
||||
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -8,4 +8,5 @@ test.md
|
||||
mysql-bkup
|
||||
/.DS_Store
|
||||
/.idea
|
||||
bin
|
||||
bin
|
||||
Makefile
|
||||
83
Dockerfile
Normal file
83
Dockerfile
Normal file
@@ -0,0 +1,83 @@
|
||||
FROM golang:1.22.5 AS build
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the source code.
|
||||
COPY . .
|
||||
# Installs Go dependencies
|
||||
RUN go mod download
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
||||
|
||||
FROM alpine:3.20.3
|
||||
ENV DB_HOST=""
|
||||
ENV DB_NAME=""
|
||||
ENV DB_USERNAME=""
|
||||
ENV DB_PASSWORD=""
|
||||
ENV DB_PORT=3306
|
||||
ENV STORAGE=local
|
||||
ENV AWS_S3_ENDPOINT=""
|
||||
ENV AWS_S3_BUCKET_NAME=""
|
||||
ENV AWS_ACCESS_KEY=""
|
||||
ENV AWS_SECRET_KEY=""
|
||||
ENV AWS_S3_PATH=""
|
||||
ENV AWS_REGION="us-west-2"
|
||||
ENV AWS_DISABLE_SSL="false"
|
||||
ENV AWS_FORCE_PATH_STYLE="true"
|
||||
ENV GPG_PASSPHRASE=""
|
||||
ENV SSH_USER=""
|
||||
ENV SSH_PASSWORD=""
|
||||
ENV SSH_HOST=""
|
||||
ENV SSH_IDENTIFY_FILE=""
|
||||
ENV SSH_PORT=22
|
||||
ENV REMOTE_PATH=""
|
||||
ENV FTP_HOST=""
|
||||
ENV FTP_PORT=21
|
||||
ENV FTP_USER=""
|
||||
ENV FTP_PASSWORD=""
|
||||
ENV TARGET_DB_HOST=""
|
||||
ENV TARGET_DB_PORT=3306
|
||||
ENV TARGET_DB_NAME=""
|
||||
ENV TARGET_DB_USERNAME=""
|
||||
ENV TARGET_DB_PASSWORD=""
|
||||
ENV BACKUP_CRON_EXPRESSION=""
|
||||
ENV TG_TOKEN=""
|
||||
ENV TG_CHAT_ID=""
|
||||
ENV TZ=UTC
|
||||
ARG WORKDIR="/config"
|
||||
ARG BACKUPDIR="/backup"
|
||||
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||
ARG TEMPLATES_DIR="/config/templates"
|
||||
ARG appVersion="v1.2.12"
|
||||
ENV VERSION=${appVersion}
|
||||
LABEL author="Jonas Kaninda"
|
||||
LABEL version=${appVersion}
|
||||
|
||||
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata
|
||||
RUN mkdir $WORKDIR
|
||||
RUN mkdir $BACKUPDIR
|
||||
RUN mkdir $TEMPLATES_DIR
|
||||
RUN mkdir -p $BACKUP_TMP_DIR
|
||||
RUN chmod 777 $WORKDIR
|
||||
RUN chmod 777 $BACKUPDIR
|
||||
RUN chmod 777 $BACKUP_TMP_DIR
|
||||
RUN chmod 777 $WORKDIR
|
||||
|
||||
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
||||
COPY ./templates/* $TEMPLATES_DIR/
|
||||
RUN chmod +x /usr/local/bin/mysql-bkup
|
||||
|
||||
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||
|
||||
# Create backup script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
||||
chmod +x /usr/local/bin/backup
|
||||
# Create restore script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
|
||||
chmod +x /usr/local/bin/restore
|
||||
# Create migrate script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
|
||||
chmod +x /usr/local/bin/migrate
|
||||
|
||||
WORKDIR $WORKDIR
|
||||
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]
|
||||
46
Makefile
46
Makefile
@@ -1,46 +0,0 @@
|
||||
BINARY_NAME=mysql-bkup
|
||||
include .env
|
||||
export
|
||||
run:
|
||||
go run . backup
|
||||
|
||||
build:
|
||||
go build -o bin/${BINARY_NAME} .
|
||||
|
||||
compile:
|
||||
GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 .
|
||||
GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 .
|
||||
GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 .
|
||||
GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 .
|
||||
|
||||
docker-build:
|
||||
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
|
||||
|
||||
docker-run: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2
|
||||
docker-restore: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore -f ${FILE_NAME}
|
||||
|
||||
|
||||
docker-run-scheduled: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --mode scheduled --period "* * * * *"
|
||||
|
||||
|
||||
docker-run-scheduled-s3: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
||||
|
||||
docker-run-s3: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --path /custom-path
|
||||
|
||||
|
||||
docker-restore-s3: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore --storage s3 -f ${FILE_NAME} --path /custom-path
|
||||
|
||||
docker-run-ssh: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage ssh
|
||||
|
||||
docker-restore-ssh: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/mysql-bkup bkup restore --storage ssh -f ${FILE_NAME}
|
||||
|
||||
run-docs:
|
||||
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t
|
||||
124
README.md
124
README.md
@@ -1,19 +1,23 @@
|
||||
# MySQL Backup
|
||||
mysql-bkup is a Docker container image that can be used to backup and restore Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH compatible storage.
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
|
||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
|
||||
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
It also supports database __encryption__ using GPG.
|
||||
|
||||
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
||||
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
||||

|
||||

|
||||
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
|
||||
|
||||
Successfully tested on:
|
||||
- Docker
|
||||
- Docker in Swarm mode
|
||||
- Kubernetes
|
||||
- OpenShift
|
||||
|
||||
## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
|
||||
|
||||
@@ -30,13 +34,13 @@ It also supports __encrypting__ your backups using GPG.
|
||||
## Storage:
|
||||
- Local
|
||||
- AWS S3 or any S3 Alternatives for Object Storage
|
||||
- SSH
|
||||
- SSH remote server
|
||||
|
||||
## Quickstart
|
||||
|
||||
### Simple backup using Docker CLI
|
||||
|
||||
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
|
||||
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
|
||||
|
||||
```shell
|
||||
docker run --rm --network your_network_name \
|
||||
@@ -44,11 +48,17 @@ To run a one time backup, bind your local volume to `/backup` in the container a
|
||||
-e "DB_HOST=dbhost" \
|
||||
-e "DB_USERNAME=username" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup mysql-bkup backup -d database_name
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
|
||||
```yaml
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env-file \
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
### Simple backup in docker compose file
|
||||
|
||||
@@ -61,73 +71,81 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup
|
||||
command: backup
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=5432
|
||||
- DB_HOST=postgres
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=foo
|
||||
- DB_USERNAME=bar
|
||||
- DB_PASSWORD=password
|
||||
- TZ=Europe/Paris
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
|
||||
### Docker recurring backup
|
||||
|
||||
```shell
|
||||
docker run --rm --network network_name \
|
||||
-v $PWD/backup:/backup/ \
|
||||
-e "DB_HOST=hostname" \
|
||||
-e "DB_USERNAME=user" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
||||
```
|
||||
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||
|
||||
## Deploy on Kubernetes
|
||||
|
||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
|
||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
|
||||
|
||||
### Simple Kubernetes CronJob usage:
|
||||
### Simple Kubernetes backup Job :
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
kind: Job
|
||||
metadata:
|
||||
name: bkup-job
|
||||
name: backup-job
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
jobTemplate:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "5432"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
- backup -d dbname
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_HOST
|
||||
value: "mysql"
|
||||
- name: DB_USERNAME
|
||||
value: "user"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
volumeMounts:
|
||||
- mountPath: /backup
|
||||
name: backup
|
||||
volumes:
|
||||
- name: backup
|
||||
hostPath:
|
||||
path: /home/toto/backup # directory location on host
|
||||
type: Directory # this field is optional
|
||||
restartPolicy: Never
|
||||
```
|
||||
## Available image registries
|
||||
|
||||
@@ -135,8 +153,8 @@ This Docker image is published to both Docker Hub and the GitHub container regis
|
||||
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
|
||||
|
||||
```
|
||||
docker pull jkaninda/mysql-bkup:v1.0
|
||||
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
|
||||
docker pull jkaninda/mysql-bkup
|
||||
docker pull ghcr.io/jkaninda/mysql-bkup
|
||||
```
|
||||
|
||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||
@@ -150,7 +168,7 @@ While it may work against different implementations, there are no guarantees abo
|
||||
|
||||
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||
|
||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
||||
- The original image is based on `alpine` and requires additional tools, making it heavy.
|
||||
- This image is written in Go.
|
||||
- `arm64` and `arm/v7` architectures are supported.
|
||||
- Docker in Swarm mode is supported.
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -21,8 +27,9 @@ var BackupCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
//Backup
|
||||
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
|
||||
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
|
||||
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
|
||||
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
||||
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
|
||||
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
||||
|
||||
27
cmd/migrate.go
Normal file
27
cmd/migrate.go
Normal file
@@ -0,0 +1,27 @@
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/jkaninda/mysql-bkup/pkg"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var MigrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: "Migrate database from a source database to a target database",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
pkg.StartMigration(cmd)
|
||||
} else {
|
||||
utils.Fatal("Error, no argument required")
|
||||
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
@@ -24,5 +24,7 @@ var RestoreCmd = &cobra.Command{
|
||||
func init() {
|
||||
//Restore
|
||||
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
|
||||
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
|
||||
}
|
||||
|
||||
17
cmd/root.go
17
cmd/root.go
@@ -1,7 +1,9 @@
|
||||
// Package cmd /*
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -30,13 +32,10 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
|
||||
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
|
||||
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
|
||||
|
||||
rootCmd.AddCommand(VersionCmd)
|
||||
rootCmd.AddCommand(BackupCmd)
|
||||
rootCmd.AddCommand(RestoreCmd)
|
||||
rootCmd.AddCommand(MigrateCmd)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
FROM golang:1.22.5 AS build
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the source code.
|
||||
COPY . .
|
||||
# Installs Go dependencies
|
||||
RUN go mod download
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
||||
|
||||
FROM ubuntu:24.04
|
||||
ENV DB_HOST=""
|
||||
ENV DB_NAME=""
|
||||
ENV DB_USERNAME=""
|
||||
ENV DB_PASSWORD=""
|
||||
ENV DB_PORT="3306"
|
||||
ENV STORAGE=local
|
||||
ENV AWS_S3_ENDPOINT=""
|
||||
ENV AWS_S3_BUCKET_NAME=""
|
||||
ENV AWS_ACCESS_KEY=""
|
||||
ENV AWS_SECRET_KEY=""
|
||||
ENV AWS_REGION="us-west-2"
|
||||
ENV AWS_S3_PATH=""
|
||||
ENV AWS_DISABLE_SSL="false"
|
||||
ENV GPG_PASSPHRASE=""
|
||||
ENV SSH_USER=""
|
||||
ENV SSH_REMOTE_PATH=""
|
||||
ENV SSH_PASSWORD=""
|
||||
ENV SSH_HOST_NAME=""
|
||||
ENV SSH_IDENTIFY_FILE=""
|
||||
ENV SSH_PORT="22"
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV VERSION="v1.0"
|
||||
ARG WORKDIR="/app"
|
||||
ARG BACKUPDIR="/backup"
|
||||
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
|
||||
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
|
||||
LABEL author="Jonas Kaninda"
|
||||
|
||||
RUN apt-get update -qq
|
||||
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
|
||||
RUN apt install mysql-client supervisor cron gnupg -y
|
||||
|
||||
# Clear cache
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir $WORKDIR
|
||||
RUN mkdir $BACKUPDIR
|
||||
RUN mkdir -p $BACKUP_TMP_DIR
|
||||
RUN chmod 777 $WORKDIR
|
||||
RUN chmod 777 $BACKUPDIR
|
||||
RUN chmod 777 $BACKUP_TMP_DIR
|
||||
RUN touch $BACKUP_CRON && \
|
||||
touch $BACKUP_CRON_SCRIPT && \
|
||||
chmod 777 $BACKUP_CRON && \
|
||||
chmod 777 $BACKUP_CRON_SCRIPT
|
||||
|
||||
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
||||
RUN chmod +x /usr/local/bin/mysql-bkup
|
||||
|
||||
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||
|
||||
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
|
||||
WORKDIR $WORKDIR
|
||||
@@ -1,13 +0,0 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
logfile=/var/log/supervisor/supervisord.log
|
||||
pidfile=/var/run/supervisord.pid
|
||||
|
||||
[program:cron]
|
||||
command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15"
|
||||
autostart=true
|
||||
autorestart=true
|
||||
user = root
|
||||
stderr_logfile=/var/log/cron.err.log
|
||||
stdout_logfile=/var/log/cron.out.log
|
||||
@@ -1,12 +0,0 @@
|
||||
FROM ruby:3.3.4
|
||||
|
||||
ENV LC_ALL C.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US.UTF-8
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
COPY . ./
|
||||
RUN gem install bundler && bundle install
|
||||
|
||||
EXPOSE 4000
|
||||
@@ -13,10 +13,11 @@
|
||||
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
|
||||
# You can create any custom variable you would like, and they will be accessible
|
||||
# in the templates via {{ site.myvariable }}.
|
||||
title: MySQL database backup
|
||||
title: MySQL Backup Docker container image
|
||||
email: hi@jonaskaninda.com
|
||||
description: >- # this means to ignore newlines until "baseurl:"
|
||||
MySQL Backup and Restore Docker container image. Backup database to AWS S3 storage or SSH remote server.
|
||||
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database.
|
||||
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||
|
||||
baseurl: "" # the subpath of your site, e.g. /blog
|
||||
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
services:
|
||||
jekyll:
|
||||
build:
|
||||
context: ./
|
||||
ports:
|
||||
- 4000:4000
|
||||
environment:
|
||||
- JEKYLL_ENV=development
|
||||
volumes:
|
||||
- .:/usr/src/app
|
||||
stdin_open: true
|
||||
tty: true
|
||||
command: bundle exec jekyll serve -H 0.0.0.0 -t
|
||||
BIN
docs/favicon.ico
Normal file
BIN
docs/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.2 KiB |
44
docs/how-tos/backup-to-ftp.md
Normal file
44
docs/how-tos/backup-to-ftp.md
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
title: Backup to FTP remote server
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 4
|
||||
---
|
||||
# Backup to FTP remote server
|
||||
|
||||
|
||||
As described for SSH backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`.
|
||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
|
||||
|
||||
{: .note }
|
||||
These environment variables are required for SSH backup `FTP_HOST`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`.
|
||||
|
||||
```yml
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup --storage ftp -d database
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=postgres
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## FTP config
|
||||
- FTP_HOST="hostname"
|
||||
- FTP_PORT=21
|
||||
- FTP_USER=user
|
||||
- FTP_PASSWORD=password
|
||||
- REMOTE_PATH=/home/jkaninda/backups
|
||||
|
||||
# pg-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
@@ -22,10 +22,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage s3 -d database --path /my-custom-path
|
||||
command: backup --storage s3 -d database --path /my-custom-path
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
@@ -51,7 +48,7 @@ networks:
|
||||
### Recurring backups to S3
|
||||
|
||||
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
|
||||
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
|
||||
|
||||
```yml
|
||||
services:
|
||||
@@ -62,10 +59,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||
command: backup --storage s3 -d my-database --cron-expression "0 1 * * *"
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
@@ -78,6 +72,7 @@ services:
|
||||
- AWS_REGION="us-west-2"
|
||||
- AWS_ACCESS_KEY=xxxx
|
||||
- AWS_SECRET_KEY=xxxxx
|
||||
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||
- AWS_DISABLE_SSL="false"
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
@@ -110,7 +105,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
- backup -s s3 --path /custom_path
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
|
||||
@@ -7,11 +7,11 @@ nav_order: 3
|
||||
# Backup to SSH remote server
|
||||
|
||||
|
||||
As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`.
|
||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
|
||||
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
|
||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
|
||||
|
||||
{: .note }
|
||||
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
|
||||
These environment variables are required for SSH backup `SSH_HOST`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
|
||||
Accessing the remote server using password is not recommended, use private key instead.
|
||||
|
||||
```yml
|
||||
@@ -23,10 +23,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage remote -d database
|
||||
command: backup --storage remote -d database
|
||||
volumes:
|
||||
- ./id_ed25519:/tmp/id_ed25519"
|
||||
environment:
|
||||
@@ -36,10 +33,10 @@ services:
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## SSH config
|
||||
- SSH_HOST_NAME="hostname"
|
||||
- SSH_HOST="hostname"
|
||||
- SSH_PORT=22
|
||||
- SSH_USER=user
|
||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
||||
- REMOTE_PATH=/home/jkaninda/backups
|
||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||
## We advise you to use a private jey instead of password
|
||||
#- SSH_PASSWORD=password
|
||||
@@ -55,7 +52,7 @@ networks:
|
||||
### Recurring backups to SSH remote server
|
||||
|
||||
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
|
||||
You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||
You can use `--cron-expression "* * * * *"` or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
|
||||
|
||||
```yml
|
||||
services:
|
||||
@@ -66,10 +63,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -d database --storage ssh --mode scheduled --period "0 1 * * *"
|
||||
command: backup -d database --storage ssh --cron-expression "0 1 * * *"
|
||||
volumes:
|
||||
- ./id_ed25519:/tmp/id_ed25519"
|
||||
environment:
|
||||
@@ -79,11 +73,12 @@ services:
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## SSH config
|
||||
- SSH_HOST_NAME="hostname"
|
||||
- SSH_HOST="hostname"
|
||||
- SSH_PORT=22
|
||||
- SSH_USER=user
|
||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
||||
- REMOTE_PATH=/home/jkaninda/backups
|
||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||
## We advise you to use a private jey instead of password
|
||||
#- SSH_PASSWORD=password
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
@@ -117,7 +112,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s ssh
|
||||
- backup -s ssh
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
@@ -130,13 +125,13 @@ spec:
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
- name: SSH_HOST
|
||||
value: ""
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "xxx"
|
||||
- name: SSH_REMOTE_PATH
|
||||
- name: REMOTE_PATH
|
||||
value: "/home/jkaninda/backups"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
|
||||
@@ -7,7 +7,7 @@ nav_order: 1
|
||||
|
||||
# Backup database
|
||||
|
||||
To backup the database, you need to add `backup` subcommand to `mysql-bkup` or `bkup`.
|
||||
To backup the database, you need to add `backup` command.
|
||||
|
||||
{: .note }
|
||||
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
|
||||
@@ -27,10 +27,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -d database
|
||||
command: backup -d database
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -54,10 +51,10 @@ networks:
|
||||
-e "DB_HOST=dbhost" \
|
||||
-e "DB_USERNAME=username" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup mysql-bkup backup -d database_name
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
|
||||
|
||||
```yml
|
||||
services:
|
||||
@@ -68,10 +65,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -d database --mode scheduled --period "0 1 * * *"
|
||||
command: backup -d database --cron-expression "0 1 * * *"
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -80,6 +74,7 @@ services:
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
- BACKUP_CRON_EXPRESSION=0 1 * * *
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
|
||||
303
docs/how-tos/deploy-on-kubernetes.md
Normal file
303
docs/how-tos/deploy-on-kubernetes.md
Normal file
@@ -0,0 +1,303 @@
|
||||
---
|
||||
title: Deploy on Kubernetes
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 9
|
||||
---
|
||||
|
||||
## Deploy on Kubernetes
|
||||
|
||||
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
|
||||
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
|
||||
|
||||
## Backup to S3 storage
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: backup
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- backup --storage s3
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Backup Job to SSH remote server
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: backup
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- ssh
|
||||
- --disable-compression
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: "xxx"
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "xxx"
|
||||
- name: SSH_PASSWORD
|
||||
value: "xxxx"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/toto/backup"
|
||||
# Optional, required if you want to encrypt your backup
|
||||
- name: GPG_PASSPHRASE
|
||||
value: "xxxx"
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Restore Job
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: restore-job
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- restore
|
||||
- --storage
|
||||
- ssh
|
||||
- --file store_20231219_022941.sql.gz
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: "xxx"
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "xxx"
|
||||
- name: SSH_PASSWORD
|
||||
value: "xxxx"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/xxxx/backup"
|
||||
# Optional, required if your backup was encrypted
|
||||
#- name: GPG_PASSPHRASE
|
||||
# value: "xxxx"
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Recurring backup
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: backup-job
|
||||
spec:
|
||||
schedule: "* * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- ssh
|
||||
- --disable-compression
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "username"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: "xxx"
|
||||
- name: SSH_PORT
|
||||
value: "xxx"
|
||||
- name: SSH_USER
|
||||
value: "jkaninda"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/jkaninda/backup"
|
||||
- name: SSH_PASSWORD
|
||||
value: "password"
|
||||
# Optional, required if you want to encrypt your backup
|
||||
#- name: GPG_PASSPHRASE
|
||||
# value: "xxx"
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Kubernetes Rootless
|
||||
|
||||
This image also supports Kubernetes security context, you can run it in Rootless environment.
|
||||
It has been tested on Openshift, it works well.
|
||||
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: backup-job
|
||||
spec:
|
||||
schedule: "* * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 3000
|
||||
fsGroup: 2000
|
||||
containers:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- ssh
|
||||
- --disable-compression
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "xxx"
|
||||
- name: DB_USERNAME
|
||||
value: "xxx"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: "xxx"
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "jkaninda"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/jkaninda/backup"
|
||||
- name: SSH_PASSWORD
|
||||
value: "password"
|
||||
# Optional, required if you want to encrypt your backup
|
||||
#- name: GPG_PASSPHRASE
|
||||
# value: "xxx"
|
||||
restartPolicy: OnFailure
|
||||
```
|
||||
6
docs/how-tos/deprecated-configs.md
Normal file
6
docs/how-tos/deprecated-configs.md
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
title: Update deprecated configurations
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 11
|
||||
---
|
||||
@@ -1,27 +1,39 @@
|
||||
---
|
||||
title: Encrypt backups using GPG
|
||||
title: Encrypt backups
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 7
|
||||
nav_order: 8
|
||||
---
|
||||
# Encrypt backup
|
||||
|
||||
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
|
||||
The image supports encrypting backups using one of two available methods: GPG with passphrase or GPG with a public key.
|
||||
|
||||
|
||||
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
|
||||
|
||||
{: .warning }
|
||||
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
|
||||
|
||||
To decrypt manually, you need to install gnupg
|
||||
- GPG home directory `/config/gnupg`
|
||||
- Cipher algorithm `aes256`
|
||||
|
||||
### Decrypt backup
|
||||
{: .note }
|
||||
The backup encrypted using `GPG passphrase` method can be restored automatically, no need to decrypt it before restoration.
|
||||
Suppose you used a GPG public key during the backup process. In that case, you need to decrypt your backup before restoration because decryption using a `GPG private` key is not fully supported.
|
||||
|
||||
To decrypt manually, you need to install `gnupg`
|
||||
|
||||
```shell
|
||||
gpg --batch --passphrase "my-passphrase" \
|
||||
--output database_20240730_044201.sql.gz \
|
||||
--decrypt database_20240730_044201.sql.gz.gpg
|
||||
```
|
||||
Using your private key
|
||||
|
||||
### Backup
|
||||
```shell
|
||||
gpg --output database_20240730_044201.sql.gz --decrypt database_20240730_044201.sql.gz.gpg
|
||||
```
|
||||
## Using GPG passphrase
|
||||
|
||||
```yml
|
||||
services:
|
||||
@@ -32,10 +44,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -d database
|
||||
command: backup -d database
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -51,4 +60,32 @@ services:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
## Using GPG Public Key
|
||||
|
||||
```yml
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup -d database
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## Required to encrypt backup
|
||||
- GPG_PUBLIC_KEY=/config/public_key.asc
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
131
docs/how-tos/migrate.md
Normal file
131
docs/how-tos/migrate.md
Normal file
@@ -0,0 +1,131 @@
|
||||
---
|
||||
title: Migrate database
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 10
|
||||
---
|
||||
|
||||
# Migrate database
|
||||
|
||||
To migrate the database, you need to add `migrate` command.
|
||||
|
||||
{: .note }
|
||||
The Mysql backup has another great feature: migrating your database from a source database to a target.
|
||||
|
||||
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
|
||||
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
|
||||
|
||||
{: .warning }
|
||||
The `migrate` operation is irreversible, please backup your target database before this action.
|
||||
|
||||
### Docker compose
|
||||
```yml
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: migrate
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
## Source database
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## Target database
|
||||
- TARGET_DB_HOST=target-mysql
|
||||
- TARGET_DB_PORT=3306
|
||||
- TARGET_DB_NAME=dbname
|
||||
- TARGET_DB_USERNAME=username
|
||||
- TARGET_DB_PASSWORD=password
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
|
||||
|
||||
### Migrate database using Docker CLI
|
||||
|
||||
|
||||
```
|
||||
## Source database
|
||||
DB_HOST=mysql
|
||||
DB_PORT=3306
|
||||
DB_NAME=dbname
|
||||
DB_USERNAME=username
|
||||
DB_PASSWORD=password
|
||||
|
||||
## Taget database
|
||||
TARGET_DB_HOST=target-mysql
|
||||
TARGET_DB_PORT=3306
|
||||
TARGET_DB_NAME=dbname
|
||||
TARGET_DB_USERNAME=username
|
||||
TARGET_DB_PASSWORD=password
|
||||
```
|
||||
|
||||
```shell
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup migrate
|
||||
```
|
||||
|
||||
## Kubernetes
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: migrate-db
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- migrate
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
## Source Database
|
||||
- name: DB_HOST
|
||||
value: "mysql"
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
## Target Database
|
||||
- name: TARGET_DB_HOST
|
||||
value: "target-mysql"
|
||||
- name: TARGET_DB_PORT
|
||||
value: "3306"
|
||||
- name: TARGET_DB_NAME
|
||||
value: "dbname"
|
||||
- name: TARGET_DB_USERNAME
|
||||
value: "username"
|
||||
- name: TARGET_DB_PASSWORD
|
||||
value: "password"
|
||||
restartPolicy: Never
|
||||
```
|
||||
63
docs/how-tos/mutli-backup.md
Normal file
63
docs/how-tos/mutli-backup.md
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
title: Run multiple backup schedules in the same container
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 11
|
||||
---
|
||||
|
||||
Multiple backup schedules with different configuration can be configured by mounting a configuration file into `/config/config.yaml` `/config/config.yml` or by defining an environment variable `BACKUP_CONFIG_FILE=/backup/config.yaml`.
|
||||
|
||||
## Configuration file
|
||||
|
||||
```yaml
|
||||
#cronExpression: "@every 20m" //Optional for scheduled backups
|
||||
cronExpression: ""
|
||||
databases:
|
||||
- host: mysql1
|
||||
port: 3306
|
||||
name: database1
|
||||
user: database1
|
||||
password: password
|
||||
path: /s3-path/database1 #For SSH or FTP you need to define the full path (/home/toto/backup/)
|
||||
- host: mysql2
|
||||
port: 3306
|
||||
name: lldap
|
||||
user: lldap
|
||||
password: password
|
||||
path: /s3-path/lldap #For SSH or FTP you need to define the full path (/home/toto/backup/)
|
||||
- host: mysql3
|
||||
port: 3306
|
||||
name: keycloak
|
||||
user: keycloak
|
||||
password: password
|
||||
path: /s3-path/keycloak #For SSH or FTP you need to define the full path (/home/toto/backup/)
|
||||
- host: mysql4
|
||||
port: 3306
|
||||
name: joplin
|
||||
user: joplin
|
||||
password: password
|
||||
path: /s3-path/joplin #For SSH or FTP you need to define the full path (/home/toto/backup/)
|
||||
```
|
||||
## Docker compose file
|
||||
|
||||
```yaml
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
## Multi backup config file
|
||||
- BACKUP_CONFIG_FILE=/backup/config.yaml
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
170
docs/how-tos/receive-notification.md
Normal file
170
docs/how-tos/receive-notification.md
Normal file
@@ -0,0 +1,170 @@
|
||||
---
|
||||
title: Receive notifications
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 12
|
||||
---
|
||||
Send Email or Telegram notifications on successfully or failed backup.
|
||||
|
||||
### Email
|
||||
To send out email notifications on failed or successfully backup runs, provide SMTP credentials, a sender and a recipient:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
mysql-bkup:
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
- MAIL_HOST=
|
||||
- MAIL_PORT=587
|
||||
- MAIL_USERNAME=
|
||||
- MAIL_PASSWORD=!
|
||||
- MAIL_FROM=
|
||||
- MAIL_TO=me@example.com,team@example.com,manager@example.com
|
||||
- MAIL_SKIP_TLS=false
|
||||
## Time format for notification
|
||||
- TIME_FORMAT=2006-01-02 at 15:04:05
|
||||
## Backup reference, in case you want to identifier every backup instance
|
||||
- BACKUP_REFERENCE=database/Paris cluster
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
|
||||
### Telegram
|
||||
|
||||
```yaml
|
||||
services:
|
||||
mysql-bkup:
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
- TG_TOKEN=[BOT ID]:[BOT TOKEN]
|
||||
- TG_CHAT_ID=
|
||||
## Time format for notification
|
||||
- TIME_FORMAT=2006-01-02 at 15:04:05
|
||||
## Backup reference, in case you want to identifier every backup instance
|
||||
- BACKUP_REFERENCE=database/Paris cluster
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
|
||||
### Customize notifications
|
||||
|
||||
The title and body of the notifications can be tailored to your needs using Go templates.
|
||||
Template sources must be mounted inside the container in /config/templates:
|
||||
|
||||
- email.template: Email notification template
|
||||
- telegram.template: Telegram notification template
|
||||
- email-error.template: Error notification template
|
||||
- telegram-error.template: Error notification template
|
||||
|
||||
### Data
|
||||
|
||||
Here is a list of all data passed to the template:
|
||||
- `Database` : Database name
|
||||
- `StartTime`: Backup start time process
|
||||
- `EndTime`: Backup start time process
|
||||
- `Storage`: Backup storage
|
||||
- `BackupLocation`: Backup location
|
||||
- `BackupSize`: Backup size
|
||||
- `BackupReference`: Backup reference(eg: database/cluster name or server name)
|
||||
|
||||
> email.template:
|
||||
|
||||
|
||||
```html
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>✅ Database Backup Notification – {{.Database}}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h2>Hi,</h2>
|
||||
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
|
||||
<h3>Backup Details:</h3>
|
||||
<ul>
|
||||
<li>Database Name: {{.Database}}</li>
|
||||
<li>Backup Start Time: {{.StartTime}}</li>
|
||||
<li>Backup End Time: {{.EndTime}}</li>
|
||||
<li>Backup Storage: {{.Storage}}</li>
|
||||
<li>Backup Location: {{.BackupLocation}}</li>
|
||||
<li>Backup Size: {{.BackupSize}} bytes</li>
|
||||
<li>Backup Reference: {{.BackupReference}} </li>
|
||||
</ul>
|
||||
<p>Best regards,</p>
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
|
||||
> telegram.template
|
||||
|
||||
```html
|
||||
✅ Database Backup Notification – {{.Database}}
|
||||
Hi,
|
||||
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
|
||||
|
||||
Backup Details:
|
||||
- Database Name: {{.Database}}
|
||||
- Backup Start Time: {{.StartTime}}
|
||||
- Backup EndTime: {{.EndTime}}
|
||||
- Backup Storage: {{.Storage}}
|
||||
- Backup Location: {{.BackupLocation}}
|
||||
- Backup Size: {{.BackupSize}} bytes
|
||||
- Backup Reference: {{.BackupReference}}
|
||||
```
|
||||
|
||||
> email-error.template
|
||||
|
||||
```html
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>🔴 Urgent: Database Backup Failure Notification</title>
|
||||
</head>
|
||||
<body>
|
||||
<h2>Hi,</h2>
|
||||
<p>An error occurred during database backup.</p>
|
||||
<h3>Failure Details:</h3>
|
||||
<ul>
|
||||
<li>Error Message: {{.Error}}</li>
|
||||
<li>Date: {{.EndTime}}</li>
|
||||
<li>Backup Reference: {{.BackupReference}} </li>
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
|
||||
> telegram-error.template
|
||||
|
||||
|
||||
```html
|
||||
🔴 Urgent: Database Backup Failure Notification
|
||||
|
||||
An error occurred during database backup.
|
||||
Failure Details:
|
||||
|
||||
Error Message: {{.Error}}
|
||||
Date: {{.EndTime}}
|
||||
```
|
||||
@@ -2,12 +2,12 @@
|
||||
title: Restore database from AWS S3
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 5
|
||||
nav_order: 6
|
||||
---
|
||||
|
||||
# Restore database from S3 storage
|
||||
|
||||
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
|
||||
{: .note }
|
||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
||||
@@ -23,10 +23,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
|
||||
command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -52,50 +49,47 @@ networks:
|
||||
|
||||
## Restore on Kubernetes
|
||||
|
||||
|
||||
### Simple Kubernetes CronJob usage:
|
||||
Simple Kubernetes restore Job:
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
kind: Job
|
||||
metadata:
|
||||
name: bkup-job
|
||||
name: restore-db
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
jobTemplate:
|
||||
template:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: OnFailure
|
||||
```
|
||||
- restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
||||
```
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
title: Restore database from SSH
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 6
|
||||
nav_order: 7
|
||||
---
|
||||
# Restore database from SSH remote server
|
||||
|
||||
To restore the database from your remote server, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
|
||||
{: .note }
|
||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
||||
@@ -22,10 +22,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
|
||||
command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -50,49 +47,47 @@ networks:
|
||||
```
|
||||
## Restore on Kubernetes
|
||||
|
||||
Simple Kubernetes CronJob usage:
|
||||
Simple Kubernetes restore Job:
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
kind: Job
|
||||
metadata:
|
||||
name: bkup-job
|
||||
name: restore-db
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
jobTemplate:
|
||||
template:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup restore -s ssh -f store_20231219_022941.sql.gz
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: ""
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "xxx"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/jkaninda/backups"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: SSH_IDENTIFY_FILE
|
||||
value: "/tmp/id_ed25519"
|
||||
restartPolicy: Never
|
||||
- restore -s ssh -f store_20231219_022941.sql.gz
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: ""
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "xxx"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/jkaninda/backups"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: SSH_IDENTIFY_FILE
|
||||
value: "/tmp/id_ed25519"
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
||||
```
|
||||
@@ -2,12 +2,12 @@
|
||||
title: Restore database
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 4
|
||||
nav_order: 5
|
||||
---
|
||||
|
||||
# Restore database
|
||||
|
||||
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
|
||||
{: .note }
|
||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
||||
@@ -23,10 +23,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup restore -d database -f store_20231219_022941.sql.gz
|
||||
command: restore -d database -f store_20231219_022941.sql.gz
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
|
||||
@@ -6,7 +6,7 @@ nav_order: 1
|
||||
|
||||
# About mysql-bkup
|
||||
{:.no_toc}
|
||||
mysql-bkup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
|
||||
We are open to receiving stars, PRs, and issues!
|
||||
@@ -19,7 +19,8 @@ We are open to receiving stars, PRs, and issues!
|
||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
||||
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
It also supports database __encryption__ using GPG.
|
||||
|
||||
|
||||
{: .note }
|
||||
Code and documentation for `v1` version on [this branch][v1-branch].
|
||||
@@ -32,7 +33,7 @@ Code and documentation for `v1` version on [this branch][v1-branch].
|
||||
|
||||
### Simple backup using Docker CLI
|
||||
|
||||
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
|
||||
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
|
||||
|
||||
```shell
|
||||
docker run --rm --network your_network_name \
|
||||
@@ -40,11 +41,18 @@ To run a one time backup, bind your local volume to `/backup` in the container a
|
||||
-e "DB_HOST=dbhost" \
|
||||
-e "DB_USERNAME=username" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup mysql-bkup backup -d database_name
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
|
||||
```yaml
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env-file \
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
### Simple backup in docker compose file
|
||||
|
||||
```yaml
|
||||
@@ -56,24 +64,77 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup
|
||||
command: backup
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=postgres
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=foo
|
||||
- DB_USERNAME=bar
|
||||
- DB_PASSWORD=password
|
||||
- TZ=Europe/Paris
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
### Docker recurring backup
|
||||
|
||||
```shell
|
||||
docker run --rm --network network_name \
|
||||
-v $PWD/backup:/backup/ \
|
||||
-e "DB_HOST=hostname" \
|
||||
-e "DB_USERNAME=user" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
||||
```
|
||||
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||
|
||||
## Kubernetes
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: backup-job
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- backup -d dbname
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_HOST
|
||||
value: "mysql"
|
||||
- name: DB_USERNAME
|
||||
value: "user"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
volumeMounts:
|
||||
- mountPath: /backup
|
||||
name: backup
|
||||
volumes:
|
||||
- name: backup
|
||||
hostPath:
|
||||
path: /home/toto/backup # directory location on host
|
||||
type: Directory # this field is optional
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Available image registries
|
||||
|
||||
@@ -81,8 +142,8 @@ This Docker image is published to both Docker Hub and the GitHub container regis
|
||||
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
|
||||
|
||||
```
|
||||
docker pull jkaninda/mysql-bkup:v1.0
|
||||
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
|
||||
docker pull jkaninda/mysql-bkup
|
||||
docker pull ghcr.io/jkaninda/mysql-bkup
|
||||
```
|
||||
|
||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||
@@ -96,7 +157,7 @@ While it may work against different implementations, there are no guarantees abo
|
||||
|
||||
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||
|
||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
||||
- The original image is based on `alpine` and requires additional tools, making it heavy.
|
||||
- This image is written in Go.
|
||||
- `arm64` and `arm/v7` architectures are supported.
|
||||
- Docker in Swarm mode is supported.
|
||||
|
||||
@@ -21,7 +21,7 @@ In the old version, S3 storage was mounted using s3fs, so we decided to migrate
|
||||
|
||||
| Options | Shorts | Usage |
|
||||
|-----------------------|--------|------------------------------------------------------------------------|
|
||||
| mysql-bkup | bkup | CLI utility |
|
||||
| mysql-bkup | bkup | CLI utility |
|
||||
| backup | | Backup database operation |
|
||||
| restore | | Restore database operation |
|
||||
| history | | Show the history of backup |
|
||||
|
||||
@@ -6,7 +6,7 @@ nav_order: 2
|
||||
|
||||
# Configuration reference
|
||||
|
||||
Backup and restore targets, schedule and retention are configured using environment variables or flags.
|
||||
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
|
||||
|
||||
|
||||
|
||||
@@ -19,48 +19,63 @@ Backup and restore targets, schedule and retention are configured using environm
|
||||
| mysql-bkup | bkup | CLI utility |
|
||||
| backup | | Backup database operation |
|
||||
| restore | | Restore database operation |
|
||||
| migrate | | Migrate database from one instance to another one |
|
||||
| --storage | -s | Storage. local or s3 (default: local) |
|
||||
| --file | -f | File name for restoration |
|
||||
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
|
||||
| --dbname | -d | Database name |
|
||||
| --port | -p | Database port (default: 3306) |
|
||||
| --mode | -m | Execution mode. default or scheduled (default: default) |
|
||||
| --disable-compression | | Disable database backup compression |
|
||||
| --prune | | Delete old backup, default disabled |
|
||||
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
|
||||
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
|
||||
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
|
||||
| --help | -h | Print this help message and exit |
|
||||
| --version | -V | Print version information and exit |
|
||||
|
||||
## Environment variables
|
||||
|
||||
| Name | Requirement | Description |
|
||||
|-------------------|--------------------------------------------------|------------------------------------------------------|
|
||||
| DB_PORT | Optional, default 3306 | Database port number |
|
||||
| DB_HOST | Required | Database host |
|
||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||
| DB_USERNAME | Required | Database user name |
|
||||
| DB_PASSWORD | Required | Database password |
|
||||
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||
| Gmysql_PASSPHRASE | Optional, required to encrypt and restore backup | Gmysql passphrase |
|
||||
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
||||
| Name | Requirement | Description |
|
||||
|------------------------|---------------------------------------------------------------|-----------------------------------------------------------------|
|
||||
| DB_PORT | Optional, default 3306 | Database port number |
|
||||
| DB_HOST | Required | Database host |
|
||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||
| DB_USERNAME | Required | Database user name |
|
||||
| DB_PASSWORD | Required | Database password |
|
||||
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||
| AWS_FORCE_PATH_STYLE | Optional, required for S3 storage | Force path style |
|
||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
||||
| GPG_PUBLIC_KEY | Optional, required to encrypt backup | GPG public key, used to encrypt backup (/config/public_key.asc) |
|
||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
|
||||
| SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||
| REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
|
||||
| FTP_HOST | Optional, required for FTP storage | FTP host name |
|
||||
| FTP_PORT | Optional, required for FTP storage | FTP server port number |
|
||||
| FTP_USER | Optional, required for FTP storage | FTP user |
|
||||
| FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
|
||||
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
||||
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
||||
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
||||
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
||||
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
||||
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
|
||||
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
|
||||
| TZ | Optional | Time Zone |
|
||||
|
||||
---
|
||||
## Run in Scheduled mode
|
||||
|
||||
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
|
||||
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
|
||||
For Docker, you need to run it in scheduled mode by adding `--cron-expression "* * * * *"` flag or by defining `BACKUP_CRON_EXPRESSION=0 1 * * *` environment variable.
|
||||
|
||||
## Syntax of crontab (field description)
|
||||
|
||||
@@ -102,4 +117,22 @@ Easy to remember format:
|
||||
|
||||
```conf
|
||||
0 1 * * *
|
||||
```
|
||||
```
|
||||
## Predefined schedules
|
||||
You may use one of several pre-defined schedules in place of a cron expression.
|
||||
|
||||
| Entry | Description | Equivalent To |
|
||||
|------------------------|--------------------------------------------|---------------|
|
||||
| @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * |
|
||||
| @monthly | Run once a month, midnight, first of month | 0 0 1 * * |
|
||||
| @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 |
|
||||
| @daily (or @midnight) | Run once a day, midnight | 0 0 * * * |
|
||||
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
|
||||
|
||||
### Intervals
|
||||
You may also schedule backup task at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
|
||||
|
||||
@every <duration>
|
||||
where "duration" is a string accepted by time.
|
||||
|
||||
For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that.
|
||||
@@ -6,10 +6,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage s3 -d my-database"
|
||||
command: backup --storage s3 -d my-database"
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
version: "3"
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --dbname database_name --mode scheduled --period "0 1 * * *"
|
||||
command: backup --dbname database_name
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_USERNAME=userName
|
||||
- DB_PASSWORD=${DB_PASSWORD}
|
||||
- DB_PASSWORD=${DB_PASSWORD}
|
||||
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
||||
@@ -6,10 +6,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||
command: backup --storage s3 -d my-database
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
@@ -24,6 +21,8 @@ services:
|
||||
- AWS_SECRET_KEY=xxxxx
|
||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||
- AWS_DISABLE_SSL="false"
|
||||
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
|
||||
@@ -3,10 +3,7 @@ services:
|
||||
mysql-bkup:
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --dbname database_name
|
||||
command: backup --dbname database_name
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
|
||||
@@ -1,44 +1,47 @@
|
||||
piVersion: batch/v1
|
||||
kind: CronJob
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: bkup-job
|
||||
name: backup
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
jobTemplate:
|
||||
template:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: ACCESS_KEY
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- backup --storage s3
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
13
go.mod
13
go.mod
@@ -5,17 +5,30 @@ go 1.22.5
|
||||
require github.com/spf13/pflag v1.0.5
|
||||
|
||||
require (
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.5
|
||||
github.com/aws/aws-sdk-go v1.55.3
|
||||
github.com/bramvdbogaerde/go-scp v1.5.0
|
||||
github.com/hpcloud/tail v1.0.0
|
||||
github.com/jlaffaye/ftp v0.2.0
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/spf13/cobra v1.8.0
|
||||
golang.org/x/crypto v0.18.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/cloudflare/circl v1.3.3 // indirect
|
||||
github.com/go-mail/mail v2.3.1+incompatible // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
)
|
||||
|
||||
71
go.sum
71
go.sum
@@ -1,20 +1,43 @@
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.5 h1:STOY3vgES59gNgoOt2w0nyHBjKViB/qSg7NjbQWPJkA=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.5/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
|
||||
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
|
||||
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
|
||||
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
|
||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM=
|
||||
github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
||||
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
@@ -23,16 +46,64 @@ github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3k
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
13
main.go
13
main.go
@@ -1,12 +1,11 @@
|
||||
// Package main /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package main
|
||||
|
||||
//main
|
||||
/*****
|
||||
* MySQL Backup & Restore
|
||||
* @author Jonas Kaninda
|
||||
* @license MIT License <https://opensource.org/licenses/MIT>
|
||||
* @link https://github.com/jkaninda/mysql-bkup
|
||||
**/
|
||||
import "github.com/jkaninda/mysql-bkup/cmd"
|
||||
|
||||
func main() {
|
||||
|
||||
437
pkg/backup.go
437
pkg/backup.go
@@ -1,13 +1,15 @@
|
||||
// Package pkg /*
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/hpcloud/tail"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/spf13/cobra"
|
||||
"log"
|
||||
"os"
|
||||
@@ -17,134 +19,159 @@ import (
|
||||
)
|
||||
|
||||
func StartBackup(cmd *cobra.Command) {
|
||||
_, _ = cmd.Flags().GetString("operation")
|
||||
//Set env
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
utils.GetEnv(cmd, "port", "DB_PORT")
|
||||
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
|
||||
|
||||
//Get flag value and set env
|
||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
||||
prune, _ := cmd.Flags().GetBool("prune")
|
||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||
executionMode, _ = cmd.Flags().GetString("mode")
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
|
||||
//
|
||||
if gpqPassphrase != "" {
|
||||
encryption = true
|
||||
}
|
||||
|
||||
//Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
|
||||
if disableCompression {
|
||||
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
|
||||
}
|
||||
|
||||
if executionMode == "default" {
|
||||
switch storage {
|
||||
case "s3":
|
||||
s3Backup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "local":
|
||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "ssh", "remote":
|
||||
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
||||
case "ftp":
|
||||
utils.Fatal("Not supported storage type: %s", storage)
|
||||
default:
|
||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
intro()
|
||||
//Initialize backup configs
|
||||
config := initBackupConfig(cmd)
|
||||
//Load backup configuration file
|
||||
configFile, err := loadConfigFile()
|
||||
if err != nil {
|
||||
dbConf = initDbConfig(cmd)
|
||||
if config.cronExpression == "" {
|
||||
BackupTask(dbConf, config)
|
||||
} else {
|
||||
if utils.IsValidCronExpression(config.cronExpression) {
|
||||
scheduledMode(dbConf, config)
|
||||
} else {
|
||||
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
|
||||
}
|
||||
}
|
||||
|
||||
} else if executionMode == "scheduled" {
|
||||
scheduledMode(storage)
|
||||
} else {
|
||||
utils.Fatal("Error, unknown execution mode!")
|
||||
startMultiBackup(config, configFile)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Run in scheduled mode
|
||||
func scheduledMode(storage string) {
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("**********************************")
|
||||
fmt.Println(" Starting MySQL Bkup... ")
|
||||
fmt.Println("***********************************")
|
||||
func scheduledMode(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Running in Scheduled mode")
|
||||
utils.Info("Execution period %s", os.Getenv("SCHEDULE_PERIOD"))
|
||||
utils.Info("Storage type %s ", storage)
|
||||
|
||||
//Test database connexion
|
||||
utils.TestDatabaseConnection()
|
||||
utils.Info("Backup cron expression: %s", config.cronExpression)
|
||||
utils.Info("Storage type %s ", config.storage)
|
||||
|
||||
//Test backup
|
||||
utils.Info("Testing backup configurations...")
|
||||
BackupTask(db, config)
|
||||
utils.Info("Testing backup configurations...done")
|
||||
utils.Info("Creating backup job...")
|
||||
CreateCrontabScript(disableCompression, storage)
|
||||
// Create a new cron instance
|
||||
c := cron.New()
|
||||
|
||||
supervisorConfig := "/etc/supervisor/supervisord.conf"
|
||||
|
||||
// Start Supervisor
|
||||
cmd := exec.Command("supervisord", "-c", supervisorConfig)
|
||||
err := cmd.Start()
|
||||
_, err := c.AddFunc(config.cronExpression, func() {
|
||||
BackupTask(db, config)
|
||||
})
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Failed to start supervisord: %v", err))
|
||||
return
|
||||
}
|
||||
// Start the cron scheduler
|
||||
c.Start()
|
||||
utils.Info("Creating backup job...done")
|
||||
utils.Info("Backup job started")
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
utils.Info("Failed to kill supervisord process: %v", err)
|
||||
} else {
|
||||
utils.Info("Supervisor stopped.")
|
||||
}
|
||||
}()
|
||||
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) {
|
||||
utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile))
|
||||
defer c.Stop()
|
||||
select {}
|
||||
}
|
||||
func BackupTask(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Starting backup task...")
|
||||
//Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
|
||||
if config.disableCompression {
|
||||
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
|
||||
}
|
||||
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true})
|
||||
config.backupFileName = backupFileName
|
||||
switch config.storage {
|
||||
case "local":
|
||||
localBackup(db, config)
|
||||
case "s3", "S3":
|
||||
s3Backup(db, config)
|
||||
case "ssh", "SSH", "remote":
|
||||
sshBackup(db, config)
|
||||
case "ftp", "FTP":
|
||||
ftpBackup(db, config)
|
||||
//utils.Fatal("Not supported storage type: %s", config.storage)
|
||||
default:
|
||||
localBackup(db, config)
|
||||
}
|
||||
}
|
||||
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
|
||||
for _, db := range databases {
|
||||
//Check if path is defined in config file
|
||||
if db.Path != "" {
|
||||
bkConfig.remotePath = db.Path
|
||||
}
|
||||
BackupTask(getDatabase(db), bkConfig)
|
||||
}
|
||||
}
|
||||
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
|
||||
utils.Info("Starting multiple backup jobs...")
|
||||
var conf = &Config{}
|
||||
conf, err := readConf(configFile)
|
||||
if err != nil {
|
||||
utils.Fatal("Failed to tail file: %v", err)
|
||||
utils.Fatal("Error reading config file: %s", err)
|
||||
}
|
||||
//Check if cronExpression is defined in config file
|
||||
if conf.CronExpression != "" {
|
||||
bkConfig.cronExpression = conf.CronExpression
|
||||
}
|
||||
// Check if cronExpression is defined
|
||||
if bkConfig.cronExpression == "" {
|
||||
multiBackupTask(conf.Databases, bkConfig)
|
||||
} else {
|
||||
// Check if cronExpression is valid
|
||||
if utils.IsValidCronExpression(bkConfig.cronExpression) {
|
||||
utils.Info("Running MultiBackup in Scheduled mode")
|
||||
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
|
||||
utils.Info("Storage type %s ", bkConfig.storage)
|
||||
|
||||
//Test backup
|
||||
utils.Info("Testing backup configurations...")
|
||||
multiBackupTask(conf.Databases, bkConfig)
|
||||
utils.Info("Testing backup configurations...done")
|
||||
utils.Info("Creating multi backup job...")
|
||||
// Create a new cron instance
|
||||
c := cron.New()
|
||||
|
||||
_, err := c.AddFunc(bkConfig.cronExpression, func() {
|
||||
// Create a channel
|
||||
multiBackupTask(conf.Databases, bkConfig)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Start the cron scheduler
|
||||
c.Start()
|
||||
utils.Info("Creating multi backup job...done")
|
||||
utils.Info("Backup job started")
|
||||
defer c.Stop()
|
||||
select {}
|
||||
|
||||
} else {
|
||||
utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
|
||||
}
|
||||
}
|
||||
|
||||
// Read and print new lines from the log file
|
||||
for line := range t.Lines {
|
||||
fmt.Println(line.Text)
|
||||
}
|
||||
}
|
||||
|
||||
// BackupDatabase backup database
|
||||
func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
dbHost = os.Getenv("DB_HOST")
|
||||
dbPassword = os.Getenv("DB_PASSWORD")
|
||||
dbUserName = os.Getenv("DB_USERNAME")
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
dbPort = os.Getenv("DB_PORT")
|
||||
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
|
||||
|
||||
storagePath = os.Getenv("STORAGE_PATH")
|
||||
|
||||
err := utils.CheckEnvVars(dbHVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for database are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
|
||||
utils.Info("Starting database backup...")
|
||||
utils.TestDatabaseConnection()
|
||||
|
||||
err := os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
testDatabaseConnection(db)
|
||||
// Backup Database database
|
||||
utils.Info("Backing up database...")
|
||||
|
||||
// Verify is compression is disabled
|
||||
if disableCompression {
|
||||
// Execute mysqldump
|
||||
cmd := exec.Command("mysqldump",
|
||||
"-h", dbHost,
|
||||
"-P", dbPort,
|
||||
"-u", dbUserName,
|
||||
"--password="+dbPassword,
|
||||
dbName,
|
||||
"-h", db.dbHost,
|
||||
"-P", db.dbPort,
|
||||
"-u", db.dbUserName,
|
||||
db.dbName,
|
||||
)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -152,7 +179,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
}
|
||||
|
||||
// save output
|
||||
file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
|
||||
file, err := os.Create(filepath.Join(tmpPath, backupFileName))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -166,14 +193,14 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
|
||||
} else {
|
||||
// Execute mysqldump
|
||||
cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName)
|
||||
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
gzipCmd := exec.Command("gzip")
|
||||
gzipCmd.Stdin = stdout
|
||||
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
|
||||
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
|
||||
gzipCmd.Start()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -189,93 +216,215 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
}
|
||||
|
||||
}
|
||||
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func localBackup(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Backup database to local storage")
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
|
||||
}
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error:", err)
|
||||
}
|
||||
//Get backup info
|
||||
backupSize = fileInfo.Size()
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
moveToBackup(finalFileName, storagePath)
|
||||
//Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
//Delete old backup
|
||||
if prune {
|
||||
deleteOldBackup(backupRetention)
|
||||
if config.prune {
|
||||
deleteOldBackup(config.backupRetention)
|
||||
}
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
utils.Info("Backup completed successfully")
|
||||
}
|
||||
|
||||
func s3Backup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
||||
utils.Info("Backup database to s3 storage")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
|
||||
//Backup database
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup file to S3 storage...")
|
||||
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
||||
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||
err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||
if err != nil {
|
||||
utils.Fatal("Error uploading file to S3: %s ", err)
|
||||
utils.Fatal("Error uploading backup archive to S3: %s ", err)
|
||||
|
||||
}
|
||||
|
||||
//Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error:", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
//Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
||||
if err != nil {
|
||||
fmt.Println("Error deleting file: ", err)
|
||||
|
||||
}
|
||||
// Delete old backup
|
||||
if prune {
|
||||
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
|
||||
if config.prune {
|
||||
err := DeleteOldBackup(bucket, s3Path, config.backupRetention)
|
||||
if err != nil {
|
||||
utils.Fatal("Error deleting old backup from S3: %s ", err)
|
||||
}
|
||||
}
|
||||
utils.Done("Database has been backed up and uploaded to s3 ")
|
||||
utils.Done("Uploading backup archive to remote storage S3 ... done ")
|
||||
//Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
utils.Info("Backup completed successfully")
|
||||
|
||||
}
|
||||
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Backup database to Remote server")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
|
||||
//Backup database
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup file to remote server...")
|
||||
utils.Info("Uploading backup archive to remote storage ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := CopyToRemote(finalFileName, remotePath)
|
||||
err := CopyToRemote(finalFileName, config.remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal("Error uploading file to the remote server: %s ", err)
|
||||
|
||||
}
|
||||
|
||||
//Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error:", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
//Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
fmt.Println("Error deleting file: ", err)
|
||||
utils.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if prune {
|
||||
if config.prune {
|
||||
//TODO: Delete old backup from remote server
|
||||
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
||||
|
||||
}
|
||||
|
||||
utils.Done("Database has been backed up and uploaded to remote server ")
|
||||
utils.Done("Uploading backup archive to remote storage ... done ")
|
||||
//Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
utils.Info("Backup completed successfully")
|
||||
|
||||
}
|
||||
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Backup database to the remote FTP server")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
|
||||
//Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup archive to the remote FTP server ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := CopyToFTP(finalFileName, config.remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
|
||||
|
||||
}
|
||||
//Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error:", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
//Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if config.prune {
|
||||
//TODO: Delete old backup from remote server
|
||||
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
||||
|
||||
}
|
||||
|
||||
utils.Done("Uploading backup archive to the remote FTP server ... done ")
|
||||
//Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
utils.Info("Backup completed successfully")
|
||||
|
||||
}
|
||||
|
||||
func encryptBackup(backupFileName string) {
|
||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
|
||||
if err != nil {
|
||||
utils.Fatal("Error during encrypting backup %s", err)
|
||||
func encryptBackup(config *BackupConfig) {
|
||||
if config.usingKey {
|
||||
err := encryptWithGPGPublicKey(filepath.Join(tmpPath, config.backupFileName), config.publicKey)
|
||||
if err != nil {
|
||||
utils.Fatal("error during encrypting backup %v", err)
|
||||
}
|
||||
} else if config.passphrase != "" {
|
||||
err := encryptWithGPG(filepath.Join(tmpPath, config.backupFileName), config.passphrase)
|
||||
if err != nil {
|
||||
utils.Fatal("error during encrypting backup %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
272
pkg/config.go
272
pkg/config.go
@@ -1,4 +1,274 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
type Config struct {
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
Host string `yaml:"host"`
|
||||
Port string `yaml:"port"`
|
||||
Name string `yaml:"name"`
|
||||
User string `yaml:"user"`
|
||||
Password string `yaml:"password"`
|
||||
Path string `yaml:"path"`
|
||||
}
|
||||
type Config struct {
|
||||
Databases []Database `yaml:"databases"`
|
||||
CronExpression string `yaml:"cronExpression"`
|
||||
}
|
||||
|
||||
type dbConfig struct {
|
||||
dbHost string
|
||||
dbPort string
|
||||
dbName string
|
||||
dbUserName string
|
||||
dbPassword string
|
||||
}
|
||||
type targetDbConfig struct {
|
||||
targetDbHost string
|
||||
targetDbPort string
|
||||
targetDbUserName string
|
||||
targetDbPassword string
|
||||
targetDbName string
|
||||
}
|
||||
type TgConfig struct {
|
||||
Token string
|
||||
ChatId string
|
||||
}
|
||||
type BackupConfig struct {
|
||||
backupFileName string
|
||||
backupRetention int
|
||||
disableCompression bool
|
||||
prune bool
|
||||
remotePath string
|
||||
encryption bool
|
||||
usingKey bool
|
||||
passphrase string
|
||||
publicKey string
|
||||
storage string
|
||||
cronExpression string
|
||||
}
|
||||
type FTPConfig struct {
|
||||
host string
|
||||
user string
|
||||
password string
|
||||
port string
|
||||
remotePath string
|
||||
}
|
||||
|
||||
// SSHConfig holds the SSH connection details
|
||||
type SSHConfig struct {
|
||||
user string
|
||||
password string
|
||||
hostName string
|
||||
port string
|
||||
identifyFile string
|
||||
}
|
||||
type AWSConfig struct {
|
||||
endpoint string
|
||||
bucket string
|
||||
accessKey string
|
||||
secretKey string
|
||||
region string
|
||||
disableSsl bool
|
||||
forcePathStyle bool
|
||||
}
|
||||
|
||||
func initDbConfig(cmd *cobra.Command) *dbConfig {
|
||||
//Set env
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
dConf := dbConfig{}
|
||||
dConf.dbHost = os.Getenv("DB_HOST")
|
||||
dConf.dbPort = os.Getenv("DB_PORT")
|
||||
dConf.dbName = os.Getenv("DB_NAME")
|
||||
dConf.dbUserName = os.Getenv("DB_USERNAME")
|
||||
dConf.dbPassword = os.Getenv("DB_PASSWORD")
|
||||
|
||||
err := utils.CheckEnvVars(dbHVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for database are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
return &dConf
|
||||
}
|
||||
|
||||
func getDatabase(database Database) *dbConfig {
|
||||
return &dbConfig{
|
||||
dbHost: database.Host,
|
||||
dbPort: database.Port,
|
||||
dbName: database.Name,
|
||||
dbUserName: database.User,
|
||||
dbPassword: database.Password,
|
||||
}
|
||||
}
|
||||
|
||||
// loadSSHConfig loads the SSH configuration from environment variables
|
||||
func loadSSHConfig() (*SSHConfig, error) {
|
||||
utils.GetEnvVariable("SSH_HOST", "SSH_HOST_NAME")
|
||||
sshVars := []string{"SSH_USER", "SSH_HOST", "SSH_PORT", "REMOTE_PATH"}
|
||||
err := utils.CheckEnvVars(sshVars)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error missing environment variables: %w", err)
|
||||
}
|
||||
|
||||
return &SSHConfig{
|
||||
user: os.Getenv("SSH_USER"),
|
||||
password: os.Getenv("SSH_PASSWORD"),
|
||||
hostName: os.Getenv("SSH_HOST"),
|
||||
port: os.Getenv("SSH_PORT"),
|
||||
identifyFile: os.Getenv("SSH_IDENTIFY_FILE"),
|
||||
}, nil
|
||||
}
|
||||
func initFtpConfig() *FTPConfig {
|
||||
//Initialize data configs
|
||||
fConfig := FTPConfig{}
|
||||
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
|
||||
fConfig.user = os.Getenv("FTP_USER")
|
||||
fConfig.password = os.Getenv("FTP_PASSWORD")
|
||||
fConfig.port = os.Getenv("FTP_PORT")
|
||||
fConfig.remotePath = os.Getenv("REMOTE_PATH")
|
||||
err := utils.CheckEnvVars(ftpVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for FTP are set")
|
||||
utils.Fatal("Error missing environment variables: %s", err)
|
||||
}
|
||||
return &fConfig
|
||||
}
|
||||
func initAWSConfig() *AWSConfig {
|
||||
//Initialize AWS configs
|
||||
aConfig := AWSConfig{}
|
||||
aConfig.endpoint = utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
||||
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
||||
aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
||||
aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
aConfig.region = os.Getenv("AWS_REGION")
|
||||
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
||||
if err != nil {
|
||||
utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
||||
}
|
||||
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
|
||||
if err != nil {
|
||||
utils.Fatal("Unable to parse AWS_FORCE_PATH_STYLE env var: %s", err)
|
||||
}
|
||||
aConfig.disableSsl = disableSsl
|
||||
aConfig.forcePathStyle = forcePathStyle
|
||||
err = utils.CheckEnvVars(awsVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for AWS S3 are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
return &aConfig
|
||||
}
|
||||
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
||||
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||
//Get flag value and set env
|
||||
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
||||
prune, _ := cmd.Flags().GetBool("prune")
|
||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||
_, _ = cmd.Flags().GetString("mode")
|
||||
passphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
|
||||
|
||||
publicKeyFile, err := checkPubKeyFile(os.Getenv("GPG_PUBLIC_KEY"))
|
||||
if err == nil {
|
||||
encryption = true
|
||||
usingKey = true
|
||||
} else if passphrase != "" {
|
||||
encryption = true
|
||||
usingKey = false
|
||||
}
|
||||
//Initialize backup configs
|
||||
config := BackupConfig{}
|
||||
config.backupRetention = backupRetention
|
||||
config.disableCompression = disableCompression
|
||||
config.prune = prune
|
||||
config.storage = storage
|
||||
config.encryption = encryption
|
||||
config.remotePath = remotePath
|
||||
config.passphrase = passphrase
|
||||
config.publicKey = publicKeyFile
|
||||
config.usingKey = usingKey
|
||||
config.cronExpression = cronExpression
|
||||
return &config
|
||||
}
|
||||
|
||||
type RestoreConfig struct {
|
||||
s3Path string
|
||||
remotePath string
|
||||
storage string
|
||||
file string
|
||||
bucket string
|
||||
usingKey bool
|
||||
passphrase string
|
||||
privateKey string
|
||||
}
|
||||
|
||||
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||
|
||||
//Get flag value and set env
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
passphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
privateKeyFile, err := checkPrKeyFile(os.Getenv("GPG_PRIVATE_KEY"))
|
||||
if err == nil {
|
||||
usingKey = true
|
||||
} else if passphrase != "" {
|
||||
usingKey = false
|
||||
}
|
||||
|
||||
//Initialize restore configs
|
||||
rConfig := RestoreConfig{}
|
||||
rConfig.s3Path = s3Path
|
||||
rConfig.remotePath = remotePath
|
||||
rConfig.storage = storage
|
||||
rConfig.bucket = bucket
|
||||
rConfig.file = file
|
||||
rConfig.storage = storage
|
||||
rConfig.passphrase = passphrase
|
||||
rConfig.usingKey = usingKey
|
||||
rConfig.privateKey = privateKeyFile
|
||||
return &rConfig
|
||||
}
|
||||
func initTargetDbConfig() *targetDbConfig {
|
||||
tdbConfig := targetDbConfig{}
|
||||
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
||||
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
|
||||
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
|
||||
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
|
||||
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
|
||||
|
||||
err := utils.CheckEnvVars(tdbRVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for the target database are set")
|
||||
utils.Fatal("Error checking target database environment variables: %s", err)
|
||||
}
|
||||
return &tdbConfig
|
||||
}
|
||||
func loadConfigFile() (string, error) {
|
||||
backupConfigFile, err := checkConfigFile(os.Getenv("BACKUP_CONFIG_FILE"))
|
||||
if err == nil {
|
||||
return backupConfigFile, nil
|
||||
}
|
||||
return "", fmt.Errorf("backup config file not found")
|
||||
}
|
||||
|
||||
171
pkg/encrypt.go
171
pkg/encrypt.go
@@ -1,42 +1,179 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func Decrypt(inputFile string, passphrase string) error {
|
||||
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Run()
|
||||
// decryptWithGPG decrypts backup file using a passphrase
|
||||
func decryptWithGPG(inputFile string, passphrase string) error {
|
||||
utils.Info("Decrypting backup using passphrase...")
|
||||
// Read the encrypted file
|
||||
encFileContent, err := os.ReadFile(inputFile)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.New(fmt.Sprintf("Error reading encrypted file: %s", err))
|
||||
}
|
||||
// Define the passphrase used to encrypt the file
|
||||
_passphrase := []byte(passphrase)
|
||||
// Create a PGP message object from the encrypted file content
|
||||
encryptedMessage := crypto.NewPGPMessage(encFileContent)
|
||||
// Decrypt the message using the passphrase
|
||||
plainMessage, err := crypto.DecryptMessageWithPassword(encryptedMessage, _passphrase)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error decrypting file: %s", err))
|
||||
}
|
||||
|
||||
// Save the decrypted file (restore it)
|
||||
err = os.WriteFile(RemoveLastExtension(inputFile), plainMessage.GetBinary(), 0644)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error saving decrypted file: %s", err))
|
||||
}
|
||||
utils.Info("Decrypting backup using passphrase...done")
|
||||
utils.Info("Backup file decrypted successful!")
|
||||
return nil
|
||||
}
|
||||
|
||||
func Encrypt(inputFile string, passphrase string) error {
|
||||
utils.Info("Encrypting backup...")
|
||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Run()
|
||||
// encryptWithGPG encrypts backup using a passphrase
|
||||
func encryptWithGPG(inputFile string, passphrase string) error {
|
||||
utils.Info("Encrypting backup using passphrase...")
|
||||
// Read the file to be encrypted
|
||||
plainFileContent, err := os.ReadFile(inputFile)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.New(fmt.Sprintf("Error reading file: %s", err))
|
||||
}
|
||||
// Define the passphrase to encrypt the file
|
||||
_passphrase := []byte(passphrase)
|
||||
|
||||
// Create a message object from the file content
|
||||
message := crypto.NewPlainMessage(plainFileContent)
|
||||
// Encrypt the message using the passphrase
|
||||
encryptedMessage, err := crypto.EncryptMessageWithPassword(message, _passphrase)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error encrypting backup file: %s", err))
|
||||
}
|
||||
// Save the encrypted .tar file
|
||||
err = os.WriteFile(fmt.Sprintf("%s.%s", inputFile, gpgExtension), encryptedMessage.GetBinary(), 0644)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error saving encrypted filee: %s", err))
|
||||
}
|
||||
utils.Info("Encrypting backup using passphrase...done")
|
||||
utils.Info("Backup file encrypted successful!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// encryptWithGPGPublicKey encrypts backup using a public key
|
||||
func encryptWithGPGPublicKey(inputFile string, publicKey string) error {
|
||||
utils.Info("Encrypting backup using public key...")
|
||||
// Read the public key
|
||||
pubKeyBytes, err := os.ReadFile(publicKey)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error reading public key: %s", err))
|
||||
}
|
||||
// Create a new keyring with the public key
|
||||
publicKeyObj, err := crypto.NewKeyFromArmored(string(pubKeyBytes))
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error parsing public key: %s", err))
|
||||
}
|
||||
|
||||
keyRing, err := crypto.NewKeyRing(publicKeyObj)
|
||||
if err != nil {
|
||||
|
||||
return errors.New(fmt.Sprintf("Error creating key ring: %v", err))
|
||||
}
|
||||
|
||||
// Read the file to encryptWithGPGPublicKey
|
||||
fileContent, err := os.ReadFile(inputFile)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error reading file: %v", err))
|
||||
}
|
||||
|
||||
// encryptWithGPG the file
|
||||
message := crypto.NewPlainMessage(fileContent)
|
||||
encMessage, err := keyRing.Encrypt(message, nil)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error encrypting file: %v", err))
|
||||
}
|
||||
|
||||
// Save the encrypted file
|
||||
err = os.WriteFile(fmt.Sprintf("%s.%s", inputFile, gpgExtension), encMessage.GetBinary(), 0644)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error saving encrypted file: %v", err))
|
||||
}
|
||||
utils.Info("Encrypting backup using public key...done")
|
||||
utils.Info("Backup file encrypted successful!")
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// decryptWithGPGPrivateKey decrypts backup file using a private key and passphrase.
|
||||
// privateKey GPG private key
|
||||
// passphrase GPG passphrase
|
||||
func decryptWithGPGPrivateKey(inputFile, privateKey, passphrase string) error {
|
||||
utils.Info("Encrypting backup using private key...")
|
||||
|
||||
// Read the private key
|
||||
priKeyBytes, err := os.ReadFile(privateKey)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error reading private key: %s", err))
|
||||
}
|
||||
|
||||
// Read the password for the private key (if it’s password-protected)
|
||||
password := []byte(passphrase)
|
||||
|
||||
// Create a key object from the armored private key
|
||||
privateKeyObj, err := crypto.NewKeyFromArmored(string(priKeyBytes))
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error parsing private key: %s", err))
|
||||
}
|
||||
|
||||
// Unlock the private key with the password
|
||||
if passphrase != "" {
|
||||
// Unlock the private key with the password
|
||||
_, err = privateKeyObj.Unlock(password)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error unlocking private key: %s", err))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Create a new keyring with the private key
|
||||
keyRing, err := crypto.NewKeyRing(privateKeyObj)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error creating key ring: %v", err))
|
||||
}
|
||||
|
||||
// Read the encrypted file
|
||||
encFileContent, err := os.ReadFile(inputFile)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error reading encrypted file: %s", err))
|
||||
}
|
||||
|
||||
// decryptWithGPG the file
|
||||
encryptedMessage := crypto.NewPGPMessage(encFileContent)
|
||||
message, err := keyRing.Decrypt(encryptedMessage, nil, 0)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error decrypting file: %s", err))
|
||||
}
|
||||
|
||||
// Save the decrypted file
|
||||
err = os.WriteFile(RemoveLastExtension(inputFile), message.GetBinary(), 0644)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Error saving decrypted file: %s", err))
|
||||
}
|
||||
utils.Info("Encrypting backup using public key...done")
|
||||
fmt.Println("File successfully decrypted!")
|
||||
return nil
|
||||
}
|
||||
func RemoveLastExtension(filename string) string {
|
||||
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
||||
return filename[:idx]
|
||||
|
||||
81
pkg/ftp.go
Normal file
81
pkg/ftp.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jlaffaye/ftp"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// initFtpClient initializes and authenticates an FTP client
|
||||
func initFtpClient() (*ftp.ServerConn, error) {
|
||||
ftpConfig := initFtpConfig()
|
||||
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
|
||||
}
|
||||
|
||||
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
|
||||
}
|
||||
|
||||
return ftpClient, nil
|
||||
}
|
||||
|
||||
// CopyToFTP uploads a file to the remote FTP server
|
||||
func CopyToFTP(fileName, remotePath string) (err error) {
|
||||
ftpConfig := initFtpConfig()
|
||||
ftpClient, err := initFtpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ftpClient.Quit()
|
||||
|
||||
filePath := filepath.Join(tmpPath, fileName)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file %s: %w", fileName, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
|
||||
err = ftpClient.Stor(remoteFilePath, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFromFTP downloads a file from the remote FTP server
|
||||
func CopyFromFTP(fileName, remotePath string) (err error) {
|
||||
ftpClient, err := initFtpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ftpClient.Quit()
|
||||
|
||||
remoteFilePath := filepath.Join(remotePath, fileName)
|
||||
r, err := ftpClient.Retr(remoteFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
localFilePath := filepath.Join(tmpPath, fileName)
|
||||
outFile, err := os.Create(localFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
_, err = io.Copy(outFile, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
139
pkg/helper.go
139
pkg/helper.go
@@ -1,9 +1,18 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"gopkg.in/yaml.v3"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
@@ -71,4 +80,134 @@ func deleteOldBackup(retentionDays int) {
|
||||
utils.Fatal(fmt.Sprintf("Error: %s", err))
|
||||
return
|
||||
}
|
||||
utils.Done("Deleting old backups...done")
|
||||
|
||||
}
|
||||
func deleteTemp() {
|
||||
utils.Info("Deleting %s ...", tmpPath)
|
||||
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if the current item is a file
|
||||
if !info.IsDir() {
|
||||
// Delete the file
|
||||
err = os.Remove(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
utils.Error("Error deleting files: %v", err)
|
||||
} else {
|
||||
utils.Info("Deleting %s ... done", tmpPath)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDatabaseConnection tests the database connection
|
||||
func testDatabaseConnection(db *dbConfig) {
|
||||
err := os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
utils.Info("Connecting to %s database ...", db.dbName)
|
||||
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
|
||||
// Capture the output
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
|
||||
|
||||
}
|
||||
utils.Info("Successfully connected to %s database", db.dbName)
|
||||
|
||||
}
|
||||
func intro() {
|
||||
utils.Info("Starting MySQL Backup...")
|
||||
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
|
||||
}
|
||||
func checkPubKeyFile(pubKey string) (string, error) {
|
||||
// Define possible key file names
|
||||
keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey}
|
||||
|
||||
// Loop through key file names and check if they exist
|
||||
for _, keyFile := range keyFiles {
|
||||
if _, err := os.Stat(keyFile); err == nil {
|
||||
// File exists
|
||||
return keyFile, nil
|
||||
} else if os.IsNotExist(err) {
|
||||
// File does not exist, continue to the next one
|
||||
continue
|
||||
} else {
|
||||
// An unexpected error occurred
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if neither file exists
|
||||
return "", fmt.Errorf("no public key file found")
|
||||
}
|
||||
func checkPrKeyFile(prKey string) (string, error) {
|
||||
// Define possible key file names
|
||||
keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey}
|
||||
|
||||
// Loop through key file names and check if they exist
|
||||
for _, keyFile := range keyFiles {
|
||||
if _, err := os.Stat(keyFile); err == nil {
|
||||
// File exists
|
||||
return keyFile, nil
|
||||
} else if os.IsNotExist(err) {
|
||||
// File does not exist, continue to the next one
|
||||
continue
|
||||
} else {
|
||||
// An unexpected error occurred
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if neither file exists
|
||||
return "", fmt.Errorf("no public key file found")
|
||||
}
|
||||
func readConf(configFile string) (*Config, error) {
|
||||
//configFile := filepath.Join("./", filename)
|
||||
if utils.FileExists(configFile) {
|
||||
buf, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Config{}
|
||||
err = yaml.Unmarshal(buf, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("in file %q: %w", configFile, err)
|
||||
}
|
||||
|
||||
return c, err
|
||||
}
|
||||
return nil, fmt.Errorf("config file %q not found", configFile)
|
||||
}
|
||||
func checkConfigFile(filePath string) (string, error) {
|
||||
// Define possible config file names
|
||||
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}
|
||||
|
||||
// Loop through config file names and check if they exist
|
||||
for _, configFile := range configFiles {
|
||||
if _, err := os.Stat(configFile); err == nil {
|
||||
// File exists
|
||||
return configFile, nil
|
||||
} else if os.IsNotExist(err) {
|
||||
// File does not exist, continue to the next one
|
||||
continue
|
||||
} else {
|
||||
// An unexpected error occurred
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if neither file exists
|
||||
return "", fmt.Errorf("no config file found")
|
||||
}
|
||||
|
||||
42
pkg/migrate.go
Normal file
42
pkg/migrate.go
Normal file
@@ -0,0 +1,42 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"time"
|
||||
)
|
||||
|
||||
func StartMigration(cmd *cobra.Command) {
|
||||
intro()
|
||||
utils.Info("Starting database migration...")
|
||||
//Get DB config
|
||||
dbConf = initDbConfig(cmd)
|
||||
targetDbConf = initTargetDbConfig()
|
||||
|
||||
//Defining the target database variables
|
||||
newDbConfig := dbConfig{}
|
||||
newDbConfig.dbHost = targetDbConf.targetDbHost
|
||||
newDbConfig.dbPort = targetDbConf.targetDbPort
|
||||
newDbConfig.dbName = targetDbConf.targetDbName
|
||||
newDbConfig.dbUserName = targetDbConf.targetDbUserName
|
||||
newDbConfig.dbPassword = targetDbConf.targetDbPassword
|
||||
|
||||
//Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
conf := &RestoreConfig{}
|
||||
conf.file = backupFileName
|
||||
//Backup source Database
|
||||
BackupDatabase(dbConf, backupFileName, true)
|
||||
//Restore source database into target database
|
||||
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
||||
RestoreDatabase(&newDbConfig, conf)
|
||||
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
||||
utils.Info("Database migration completed.")
|
||||
}
|
||||
142
pkg/restore.go
142
pkg/restore.go
@@ -1,7 +1,12 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
@@ -10,114 +15,121 @@ import (
|
||||
)
|
||||
|
||||
func StartRestore(cmd *cobra.Command) {
|
||||
intro()
|
||||
dbConf = initDbConfig(cmd)
|
||||
restoreConf := initRestoreConfig(cmd)
|
||||
|
||||
//Set env
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
utils.GetEnv(cmd, "port", "DB_PORT")
|
||||
|
||||
//Get flag value and set env
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
executionMode, _ = cmd.Flags().GetString("mode")
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
switch storage {
|
||||
case "s3":
|
||||
restoreFromS3(file, bucket, s3Path)
|
||||
switch restoreConf.storage {
|
||||
case "local":
|
||||
utils.Info("Restore database from local")
|
||||
copyToTmp(storagePath, file)
|
||||
RestoreDatabase(file)
|
||||
case "ssh":
|
||||
restoreFromRemote(file, remotePath)
|
||||
case "ftp":
|
||||
utils.Fatal("Restore from FTP is not yet supported")
|
||||
copyToTmp(storagePath, restoreConf.file)
|
||||
RestoreDatabase(dbConf, restoreConf)
|
||||
case "s3", "S3":
|
||||
restoreFromS3(dbConf, restoreConf)
|
||||
case "ssh", "SSH", "remote":
|
||||
restoreFromRemote(dbConf, restoreConf)
|
||||
case "ftp", "FTP":
|
||||
restoreFromFTP(dbConf, restoreConf)
|
||||
default:
|
||||
utils.Info("Restore database from local")
|
||||
RestoreDatabase(file)
|
||||
copyToTmp(storagePath, restoreConf.file)
|
||||
RestoreDatabase(dbConf, restoreConf)
|
||||
}
|
||||
}
|
||||
|
||||
func restoreFromS3(file, bucket, s3Path string) {
|
||||
func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
|
||||
utils.Info("Restore database from s3")
|
||||
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
|
||||
err := DownloadFile(tmpPath, conf.file, conf.bucket, conf.s3Path)
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error download file from s3 %s %s", file, err))
|
||||
utils.Fatal("Error download file from s3 %s %v ", conf.file, err)
|
||||
}
|
||||
RestoreDatabase(file)
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
func restoreFromRemote(file, remotePath string) {
|
||||
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
|
||||
utils.Info("Restore database from remote server")
|
||||
err := CopyFromRemote(file, remotePath)
|
||||
err := CopyFromRemote(conf.file, conf.remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error download file from remote server: ", filepath.Join(remotePath, file), err))
|
||||
utils.Fatal("Error download file from remote server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
|
||||
}
|
||||
RestoreDatabase(file)
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
|
||||
utils.Info("Restore database from FTP server")
|
||||
err := CopyFromFTP(conf.file, conf.remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
|
||||
// RestoreDatabase restore database
|
||||
func RestoreDatabase(file string) {
|
||||
dbHost = os.Getenv("DB_HOST")
|
||||
dbPassword = os.Getenv("DB_PASSWORD")
|
||||
dbUserName = os.Getenv("DB_USERNAME")
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
dbPort = os.Getenv("DB_PORT")
|
||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
if file == "" {
|
||||
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
|
||||
if conf.file == "" {
|
||||
utils.Fatal("Error, file required")
|
||||
}
|
||||
|
||||
err := utils.CheckEnvVars(dbHVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for database are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
|
||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
||||
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
|
||||
if extension == ".gpg" {
|
||||
if gpgPassphrase == "" {
|
||||
utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
|
||||
|
||||
} else {
|
||||
//Decrypt file
|
||||
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
|
||||
if conf.usingKey {
|
||||
utils.Warn("Backup decryption using a private key is not fully supported")
|
||||
err := decryptWithGPGPrivateKey(filepath.Join(tmpPath, conf.file), conf.privateKey, conf.passphrase)
|
||||
if err != nil {
|
||||
utils.Fatal("Error decrypting file %s %v", file, err)
|
||||
utils.Fatal("error during decrypting backup %v", err)
|
||||
}
|
||||
} else {
|
||||
if conf.passphrase == "" {
|
||||
utils.Error("Error, passphrase or private key required")
|
||||
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
|
||||
} else {
|
||||
//decryptWithGPG file
|
||||
err := decryptWithGPG(filepath.Join(tmpPath, conf.file), conf.passphrase)
|
||||
if err != nil {
|
||||
utils.Fatal("Error decrypting file %s %v", file, err)
|
||||
}
|
||||
//Update file name
|
||||
conf.file = RemoveLastExtension(file)
|
||||
}
|
||||
//Update file name
|
||||
file = RemoveLastExtension(file)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
|
||||
utils.TestDatabaseConnection()
|
||||
if utils.FileExists(filepath.Join(tmpPath, conf.file)) {
|
||||
err := os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
testDatabaseConnection(db)
|
||||
utils.Info("Restoring database...")
|
||||
|
||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
||||
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
|
||||
// Restore from compressed file / .sql.gz
|
||||
if extension == ".gz" {
|
||||
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
||||
_, err := exec.Command("bash", "-c", str).Output()
|
||||
str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||
_, err := exec.Command("sh", "-c", str).Output()
|
||||
if err != nil {
|
||||
utils.Fatal("Error, in restoring the database %v", err)
|
||||
}
|
||||
utils.Info("Restoring database... done")
|
||||
utils.Done("Database has been restored")
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
|
||||
} else if extension == ".sql" {
|
||||
//Restore from sql file
|
||||
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
||||
_, err := exec.Command("bash", "-c", str).Output()
|
||||
str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||
_, err := exec.Command("sh", "-c", str).Output()
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
|
||||
utils.Fatal("Error in restoring the database %v", err)
|
||||
}
|
||||
utils.Info("Restoring database... done")
|
||||
utils.Done("Database has been restored")
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
} else {
|
||||
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
|
||||
utils.Fatal("Unknown file extension %s", extension)
|
||||
}
|
||||
|
||||
} else {
|
||||
utils.Fatal(fmt.Sprintf("File not found in %s", fmt.Sprintf("%s/%s", tmpPath, file)))
|
||||
utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,56 +1,35 @@
|
||||
package utils
|
||||
// Package pkg
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"log"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CreateSession creates a new AWS session
|
||||
func CreateSession() (*session.Session, error) {
|
||||
// AwsVars Required environment variables for AWS S3 storage
|
||||
var awsVars = []string{
|
||||
"AWS_S3_ENDPOINT",
|
||||
"AWS_S3_BUCKET_NAME",
|
||||
"AWS_ACCESS_KEY",
|
||||
"AWS_SECRET_KEY",
|
||||
"AWS_REGION",
|
||||
"AWS_REGION",
|
||||
"AWS_REGION",
|
||||
}
|
||||
|
||||
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
||||
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
||||
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
||||
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
|
||||
region := os.Getenv("AWS_REGION")
|
||||
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
||||
if err != nil {
|
||||
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
||||
}
|
||||
|
||||
err = CheckEnvVars(awsVars)
|
||||
if err != nil {
|
||||
Fatal("Error checking environment variables\n: %s", err)
|
||||
}
|
||||
// S3 Config
|
||||
awsConfig := initAWSConfig()
|
||||
// Configure to use MinIO Server
|
||||
s3Config := &aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
|
||||
Endpoint: aws.String(endPoint),
|
||||
Region: aws.String(region),
|
||||
DisableSSL: aws.Bool(awsDisableSsl),
|
||||
S3ForcePathStyle: aws.Bool(true),
|
||||
Credentials: credentials.NewStaticCredentials(awsConfig.accessKey, awsConfig.secretKey, ""),
|
||||
Endpoint: aws.String(awsConfig.endpoint),
|
||||
Region: aws.String(awsConfig.region),
|
||||
DisableSSL: aws.Bool(awsConfig.disableSsl),
|
||||
S3ForcePathStyle: aws.Bool(awsConfig.forcePathStyle),
|
||||
}
|
||||
return session.NewSession(s3Config)
|
||||
|
||||
@@ -102,10 +81,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
Info("Download backup from S3 storage...")
|
||||
utils.Info("Download data from S3 storage...")
|
||||
file, err := os.Create(filepath.Join(destinationPath, key))
|
||||
if err != nil {
|
||||
fmt.Println("Failed to create file", err)
|
||||
utils.Error("Failed to create file", err)
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
@@ -119,10 +98,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
||||
Key: aws.String(objectKey),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("Failed to download file", err)
|
||||
utils.Error("Failed to download file %s", key)
|
||||
return err
|
||||
}
|
||||
Info(fmt.Sprintf("Backup downloaded: ", file.Name(), " bytes size ", numBytes))
|
||||
utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -152,18 +131,18 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
|
||||
Key: object.Key,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Failed to delete object %s: %v", *object.Key, err)
|
||||
utils.Info("Failed to delete object %s: %v", *object.Key, err)
|
||||
} else {
|
||||
fmt.Printf("Deleted object %s\n", *object.Key)
|
||||
utils.Info("Deleted object %s\n", *object.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
return !lastPage
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list objects: %v", err)
|
||||
utils.Error("Failed to list objects: %v", err)
|
||||
}
|
||||
|
||||
fmt.Println("Finished deleting old files.")
|
||||
utils.Info("Finished deleting old files.")
|
||||
return nil
|
||||
}
|
||||
106
pkg/scp.go
106
pkg/scp.go
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
@@ -12,83 +18,73 @@ import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func CopyToRemote(fileName, remotePath string) error {
|
||||
sshUser := os.Getenv("SSH_USER")
|
||||
sshPassword := os.Getenv("SSH_PASSWORD")
|
||||
sshHostName := os.Getenv("SSH_HOST_NAME")
|
||||
sshPort := os.Getenv("SSH_PORT")
|
||||
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
|
||||
|
||||
err := utils.CheckEnvVars(sshHVars)
|
||||
if err != nil {
|
||||
utils.Error("Error checking environment variables: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
|
||||
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
|
||||
|
||||
// createSSHClientConfig sets up the SSH client configuration based on the provided SSHConfig
|
||||
func createSSHClientConfig(sshConfig *SSHConfig) (ssh.ClientConfig, error) {
|
||||
if sshConfig.identifyFile != "" && utils.FileExists(sshConfig.identifyFile) {
|
||||
return auth.PrivateKey(sshConfig.user, sshConfig.identifyFile, ssh.InsecureIgnoreHostKey())
|
||||
} else {
|
||||
if sshPassword == "" {
|
||||
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
|
||||
if sshConfig.password == "" {
|
||||
return ssh.ClientConfig{}, errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
|
||||
}
|
||||
utils.Warn("Accessing the remote server using password, password is not recommended")
|
||||
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||
|
||||
utils.Warn("Accessing the remote server using password, which is not recommended.")
|
||||
return auth.PasswordKey(sshConfig.user, sshConfig.password, ssh.InsecureIgnoreHostKey())
|
||||
}
|
||||
}
|
||||
|
||||
// CopyToRemote copies a file to a remote server via SCP
|
||||
func CopyToRemote(fileName, remotePath string) error {
|
||||
// Load environment variables
|
||||
sshConfig, err := loadSSHConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load SSH configuration: %w", err)
|
||||
}
|
||||
|
||||
// Initialize SSH client config
|
||||
clientConfig, err := createSSHClientConfig(sshConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SSH client config: %w", err)
|
||||
}
|
||||
|
||||
// Create a new SCP client
|
||||
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
|
||||
client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
|
||||
|
||||
// Connect to the remote server
|
||||
err = client.Connect()
|
||||
if err != nil {
|
||||
return errors.New("Couldn't establish a connection to the remote server")
|
||||
return errors.New("Couldn't establish a connection to the remote server\n")
|
||||
}
|
||||
|
||||
// Open a file
|
||||
file, _ := os.Open(filepath.Join(tmpPath, fileName))
|
||||
|
||||
// Close client connection after the file has been copied
|
||||
// Open the local file
|
||||
filePath := filepath.Join(tmpPath, fileName)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file %s: %w", filePath, err)
|
||||
}
|
||||
defer client.Close()
|
||||
// Close the file after it has been copied
|
||||
defer file.Close()
|
||||
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
|
||||
// Copy file to the remote server
|
||||
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
|
||||
if err != nil {
|
||||
fmt.Println("Error while copying file ")
|
||||
return err
|
||||
return fmt.Errorf("failed to copy file to remote server: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func CopyFromRemote(fileName, remotePath string) error {
|
||||
sshUser := os.Getenv("SSH_USER")
|
||||
sshPassword := os.Getenv("SSH_PASSWORD")
|
||||
sshHostName := os.Getenv("SSH_HOST_NAME")
|
||||
sshPort := os.Getenv("SSH_PORT")
|
||||
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
|
||||
|
||||
err := utils.CheckEnvVars(sshHVars)
|
||||
// Load environment variables
|
||||
sshConfig, err := loadSSHConfig()
|
||||
if err != nil {
|
||||
utils.Error("Error checking environment variables\n: %s", err)
|
||||
os.Exit(1)
|
||||
return fmt.Errorf("failed to load SSH configuration: %w", err)
|
||||
}
|
||||
|
||||
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
|
||||
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
|
||||
|
||||
} else {
|
||||
if sshPassword == "" {
|
||||
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
|
||||
}
|
||||
utils.Warn("Accessing the remote server using password, password is not recommended")
|
||||
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||
|
||||
// Initialize SSH client config
|
||||
clientConfig, err := createSSHClientConfig(sshConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SSH client config: %w", err)
|
||||
}
|
||||
|
||||
// Create a new SCP client
|
||||
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
|
||||
client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
|
||||
|
||||
// Connect to the remote server
|
||||
err = client.Connect()
|
||||
@@ -107,7 +103,7 @@ func CopyFromRemote(fileName, remotePath string) error {
|
||||
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Error while copying file ", err)
|
||||
utils.Error("Error while copying file %s ", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
package pkg
|
||||
|
||||
// Package pkg /*
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func CreateCrontabScript(disableCompression bool, storage string) {
|
||||
//task := "/usr/local/bin/backup_cron.sh"
|
||||
touchCmd := exec.Command("touch", backupCronFile)
|
||||
if err := touchCmd.Run(); err != nil {
|
||||
utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
|
||||
}
|
||||
var disableC = ""
|
||||
if disableCompression {
|
||||
disableC = "--disable-compression"
|
||||
}
|
||||
|
||||
scriptContent := fmt.Sprintf(`#!/usr/bin/env bash
|
||||
set -e
|
||||
bkup backup --dbname %s --port %s --storage %s %v
|
||||
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), storage, disableC)
|
||||
|
||||
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
|
||||
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
|
||||
}
|
||||
|
||||
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
|
||||
if err := chmodCmd.Run(); err != nil {
|
||||
utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
|
||||
}
|
||||
|
||||
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
|
||||
if err := lnCmd.Run(); err != nil {
|
||||
utils.Fatal("Error creating symbolic link: %v\n", err)
|
||||
|
||||
}
|
||||
|
||||
touchLogCmd := exec.Command("touch", cronLogFile)
|
||||
if err := touchLogCmd.Run(); err != nil {
|
||||
utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
|
||||
}
|
||||
|
||||
cronJob := "/etc/cron.d/backup_cron"
|
||||
touchCronCmd := exec.Command("touch", cronJob)
|
||||
if err := touchCronCmd.Run(); err != nil {
|
||||
utils.Fatal("Error creating file %s: %v\n", cronJob, err)
|
||||
}
|
||||
|
||||
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
|
||||
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
|
||||
|
||||
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
|
||||
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
|
||||
}
|
||||
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
|
||||
|
||||
crontabCmd := exec.Command("crontab", "/etc/cron.d/backup_cron")
|
||||
if err := crontabCmd.Run(); err != nil {
|
||||
utils.Fatal("Error updating crontab: ", err)
|
||||
}
|
||||
utils.Info("Backup job created.")
|
||||
}
|
||||
55
pkg/var.go
55
pkg/var.go
@@ -1,23 +1,27 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
const cronLogFile = "/var/log/mysql-bkup.log"
|
||||
const tmpPath = "/tmp/backup"
|
||||
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
||||
const algorithm = "aes256"
|
||||
const gpgHome = "/config/gnupg"
|
||||
const gpgExtension = "gpg"
|
||||
const workingDir = "/config"
|
||||
|
||||
var (
|
||||
storage = "local"
|
||||
file = ""
|
||||
dbPassword = ""
|
||||
dbUserName = ""
|
||||
dbName = ""
|
||||
dbHost = ""
|
||||
dbPort = "3306"
|
||||
executionMode = "default"
|
||||
storagePath = "/backup"
|
||||
disableCompression = false
|
||||
encryption = false
|
||||
storage = "local"
|
||||
file = ""
|
||||
storagePath = "/backup"
|
||||
disableCompression = false
|
||||
encryption = false
|
||||
usingKey = false
|
||||
backupSize int64 = 0
|
||||
startTime string
|
||||
)
|
||||
|
||||
// dbHVars Required environment variables for database
|
||||
@@ -27,11 +31,36 @@ var dbHVars = []string{
|
||||
"DB_USERNAME",
|
||||
"DB_NAME",
|
||||
}
|
||||
var tdbRVars = []string{
|
||||
"TARGET_DB_HOST",
|
||||
"TARGET_DB_PORT",
|
||||
"TARGET_DB_NAME",
|
||||
"TARGET_DB_USERNAME",
|
||||
"TARGET_DB_PASSWORD",
|
||||
}
|
||||
|
||||
var dbConf *dbConfig
|
||||
var targetDbConf *targetDbConfig
|
||||
|
||||
// sshHVars Required environment variables for SSH remote server storage
|
||||
var sshHVars = []string{
|
||||
"SSH_USER",
|
||||
"SSH_REMOTE_PATH",
|
||||
"REMOTE_PATH",
|
||||
"SSH_HOST_NAME",
|
||||
"SSH_PORT",
|
||||
}
|
||||
var ftpVars = []string{
|
||||
"FTP_HOST_NAME",
|
||||
"FTP_USER",
|
||||
"FTP_PASSWORD",
|
||||
"FTP_PORT",
|
||||
}
|
||||
|
||||
// AwsVars Required environment variables for AWS S3 storage
|
||||
var awsVars = []string{
|
||||
"AWS_S3_ENDPOINT",
|
||||
"AWS_S3_BUCKET_NAME",
|
||||
"AWS_ACCESS_KEY",
|
||||
"AWS_SECRET_KEY",
|
||||
"AWS_REGION",
|
||||
}
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
DB_USERNAME='db_username'
|
||||
DB_PASSWORD='password'
|
||||
DB_HOST='db_hostname'
|
||||
DB_NAME='db_name'
|
||||
BACKUP_DIR="$PWD/backup"
|
||||
|
||||
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:latest backup -d $DB_NAME
|
||||
18
templates/email-error.template
Normal file
18
templates/email-error.template
Normal file
@@ -0,0 +1,18 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>🔴 Urgent: Database Backup Failure Notification</title>
|
||||
</head>
|
||||
<body>
|
||||
<h2>Hi,</h2>
|
||||
<p>An error occurred during database backup.</p>
|
||||
<h3>Failure Details:</h3>
|
||||
<ul>
|
||||
<li>Error Message: {{.Error}}</li>
|
||||
<li>Date: {{.EndTime}}</li>
|
||||
<li>Backup Reference: {{.BackupReference}} </li>
|
||||
</ul>
|
||||
<p>©2024 <a href="github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
|
||||
</body>
|
||||
</html>
|
||||
24
templates/email.template
Normal file
24
templates/email.template
Normal file
@@ -0,0 +1,24 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>✅ Database Backup Notification – {{.Database}}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h2>Hi,</h2>
|
||||
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
|
||||
<h3>Backup Details:</h3>
|
||||
<ul>
|
||||
<li>Database Name: {{.Database}}</li>
|
||||
<li>Backup Start Time: {{.StartTime}}</li>
|
||||
<li>Backup End Time: {{.EndTime}}</li>
|
||||
<li>Backup Storage: {{.Storage}}</li>
|
||||
<li>Backup Location: {{.BackupLocation}}</li>
|
||||
<li>Backup Size: {{.BackupSize}} bytes</li>
|
||||
<li>Backup Reference: {{.BackupReference}} </li>
|
||||
</ul>
|
||||
<p>Best regards,</p>
|
||||
<p>©2024 <a href="github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
|
||||
<href>
|
||||
</body>
|
||||
</html>
|
||||
8
templates/telegram-error.template
Normal file
8
templates/telegram-error.template
Normal file
@@ -0,0 +1,8 @@
|
||||
🔴 Urgent: Database Backup Failure Notification
|
||||
Hi,
|
||||
An error occurred during database backup.
|
||||
Failure Details:
|
||||
- Date: {{.EndTime}}
|
||||
- Backup Reference: {{.BackupReference}}
|
||||
- Error Message: {{.Error}}
|
||||
|
||||
12
templates/telegram.template
Normal file
12
templates/telegram.template
Normal file
@@ -0,0 +1,12 @@
|
||||
[✅ Database Backup Notification – {{.Database}}
|
||||
Hi,
|
||||
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
|
||||
|
||||
Backup Details:
|
||||
- Database Name: {{.Database}}
|
||||
- Backup Start Time: {{.StartTime}}
|
||||
- Backup EndTime: {{.EndTime}}
|
||||
- Backup Storage: {{.Storage}}
|
||||
- Backup Location: {{.BackupLocation}}
|
||||
- Backup Size: {{.BackupSize}} bytes
|
||||
- Backup Reference: {{.BackupReference}}
|
||||
59
utils/config.go
Normal file
59
utils/config.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package utils
|
||||
|
||||
import "os"
|
||||
|
||||
type MailConfig struct {
|
||||
MailHost string
|
||||
MailPort int
|
||||
MailUserName string
|
||||
MailPassword string
|
||||
MailTo string
|
||||
MailFrom string
|
||||
SkipTls bool
|
||||
}
|
||||
type NotificationData struct {
|
||||
File string
|
||||
BackupSize int64
|
||||
Database string
|
||||
StartTime string
|
||||
EndTime string
|
||||
Storage string
|
||||
BackupLocation string
|
||||
BackupReference string
|
||||
}
|
||||
type ErrorMessage struct {
|
||||
Database string
|
||||
EndTime string
|
||||
Error string
|
||||
BackupReference string
|
||||
}
|
||||
|
||||
// loadMailConfig gets mail environment variables and returns MailConfig
|
||||
func loadMailConfig() *MailConfig {
|
||||
return &MailConfig{
|
||||
MailHost: os.Getenv("MAIL_HOST"),
|
||||
MailPort: GetIntEnv("MAIL_PORT"),
|
||||
MailUserName: os.Getenv("MAIL_USERNAME"),
|
||||
MailPassword: os.Getenv("MAIL_PASSWORD"),
|
||||
MailTo: os.Getenv("MAIL_TO"),
|
||||
MailFrom: os.Getenv("MAIL_FROM"),
|
||||
SkipTls: os.Getenv("MAIL_SKIP_TLS") == "false",
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TimeFormat returns the format of the time
|
||||
func TimeFormat() string {
|
||||
format := os.Getenv("TIME_FORMAT")
|
||||
if format == "" {
|
||||
return "2006-01-02 at 15:04:05"
|
||||
|
||||
}
|
||||
return format
|
||||
}
|
||||
|
||||
func backupReference() string {
|
||||
return os.Getenv("BACKUP_REFERENCE")
|
||||
}
|
||||
|
||||
const templatePath = "/config/templates"
|
||||
@@ -1,10 +1,16 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
||||
"bkup restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
||||
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
||||
"mysql-bkup backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
||||
const RestoreExample = "restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
||||
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
||||
const BackupExample = "backup --dbname database --disable-compression\n" +
|
||||
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
||||
|
||||
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
||||
"mysql-bkup backup --dbname database --storage s3 --path /custom-path\n" +
|
||||
"mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz"
|
||||
"backup --dbname database --storage s3 --path /custom-path\n" +
|
||||
"restore --dbname database --file db_20231219_022941.sql.gz"
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
import (
|
||||
@@ -6,9 +12,8 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
||||
|
||||
func Info(msg string, args ...any) {
|
||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
||||
formattedMessage := fmt.Sprintf(msg, args...)
|
||||
if len(args) == 0 {
|
||||
fmt.Printf("%s INFO: %s\n", currentTime, msg)
|
||||
@@ -19,6 +24,7 @@ func Info(msg string, args ...any) {
|
||||
|
||||
// Warn warning message
|
||||
func Warn(msg string, args ...any) {
|
||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
||||
formattedMessage := fmt.Sprintf(msg, args...)
|
||||
if len(args) == 0 {
|
||||
fmt.Printf("%s WARN: %s\n", currentTime, msg)
|
||||
@@ -27,6 +33,7 @@ func Warn(msg string, args ...any) {
|
||||
}
|
||||
}
|
||||
func Error(msg string, args ...any) {
|
||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
||||
formattedMessage := fmt.Sprintf(msg, args...)
|
||||
if len(args) == 0 {
|
||||
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
|
||||
@@ -35,6 +42,7 @@ func Error(msg string, args ...any) {
|
||||
}
|
||||
}
|
||||
func Done(msg string, args ...any) {
|
||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
||||
formattedMessage := fmt.Sprintf(msg, args...)
|
||||
if len(args) == 0 {
|
||||
fmt.Printf("%s INFO: %s\n", currentTime, msg)
|
||||
@@ -45,12 +53,17 @@ func Done(msg string, args ...any) {
|
||||
|
||||
// Fatal logs an error message and exits the program
|
||||
func Fatal(msg string, args ...any) {
|
||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
||||
// Fatal logs an error message and exits the program.
|
||||
formattedMessage := fmt.Sprintf(msg, args...)
|
||||
if len(args) == 0 {
|
||||
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
|
||||
NotifyError(msg)
|
||||
} else {
|
||||
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
|
||||
NotifyError(formattedMessage)
|
||||
|
||||
}
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
183
utils/notification.go
Normal file
183
utils/notification.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/go-mail/mail"
|
||||
"github.com/robfig/cron/v3"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func parseTemplate[T any](data T, fileName string) (string, error) {
|
||||
// Open the file
|
||||
tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err = tmpl.Execute(&buf, data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func SendEmail(subject, body string) error {
|
||||
Info("Start sending email notification....")
|
||||
config := loadMailConfig()
|
||||
emails := strings.Split(config.MailTo, ",")
|
||||
m := mail.NewMessage()
|
||||
m.SetHeader("From", config.MailFrom)
|
||||
m.SetHeader("To", emails...)
|
||||
m.SetHeader("Subject", subject)
|
||||
m.SetBody("text/html", body)
|
||||
d := mail.NewDialer(config.MailHost, config.MailPort, config.MailUserName, config.MailPassword)
|
||||
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
|
||||
|
||||
if err := d.DialAndSend(m); err != nil {
|
||||
Error("Error could not send email : %v", err)
|
||||
return err
|
||||
}
|
||||
Info("Email notification has been sent")
|
||||
return nil
|
||||
|
||||
}
|
||||
func sendMessage(msg string) error {
|
||||
|
||||
Info("Sending Telegram notification... ")
|
||||
chatId := os.Getenv("TG_CHAT_ID")
|
||||
body, _ := json.Marshal(map[string]string{
|
||||
"chat_id": chatId,
|
||||
"text": msg,
|
||||
})
|
||||
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
|
||||
// Create an HTTP post request
|
||||
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
request.Header.Add("Content-Type", "application/json")
|
||||
client := &http.Client{}
|
||||
response, err := client.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
code := response.StatusCode
|
||||
if code == 200 {
|
||||
Info("Telegram notification has been sent")
|
||||
return nil
|
||||
} else {
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
Error("Error could not send message, error: %s", string(body))
|
||||
return fmt.Errorf("error could not send message %s", string(body))
|
||||
}
|
||||
|
||||
}
|
||||
func NotifySuccess(notificationData *NotificationData) {
|
||||
notificationData.BackupReference = backupReference()
|
||||
var vars = []string{
|
||||
"TG_TOKEN",
|
||||
"TG_CHAT_ID",
|
||||
}
|
||||
var mailVars = []string{
|
||||
"MAIL_HOST",
|
||||
"MAIL_PORT",
|
||||
"MAIL_USERNAME",
|
||||
"MAIL_PASSWORD",
|
||||
"MAIL_FROM",
|
||||
"MAIL_TO",
|
||||
}
|
||||
|
||||
//Email notification
|
||||
err := CheckEnvVars(mailVars)
|
||||
if err == nil {
|
||||
body, err := parseTemplate(*notificationData, "email.template")
|
||||
if err != nil {
|
||||
Error("Could not parse email template: %v", err)
|
||||
}
|
||||
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification – %s", notificationData.Database), body)
|
||||
if err != nil {
|
||||
Error("Could not send email: %v", err)
|
||||
}
|
||||
}
|
||||
//Telegram notification
|
||||
err = CheckEnvVars(vars)
|
||||
if err == nil {
|
||||
message, err := parseTemplate(*notificationData, "telegram.template")
|
||||
if err != nil {
|
||||
Error("Could not parse telegram template: %v", err)
|
||||
}
|
||||
|
||||
err = sendMessage(message)
|
||||
if err != nil {
|
||||
Error("Could not send Telegram message: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
func NotifyError(error string) {
|
||||
var vars = []string{
|
||||
"TG_TOKEN",
|
||||
"TG_CHAT_ID",
|
||||
}
|
||||
var mailVars = []string{
|
||||
"MAIL_HOST",
|
||||
"MAIL_PORT",
|
||||
"MAIL_USERNAME",
|
||||
"MAIL_PASSWORD",
|
||||
"MAIL_FROM",
|
||||
"MAIL_TO",
|
||||
}
|
||||
|
||||
//Email notification
|
||||
err := CheckEnvVars(mailVars)
|
||||
if err == nil {
|
||||
body, err := parseTemplate(ErrorMessage{
|
||||
Error: error,
|
||||
EndTime: time.Now().Format(TimeFormat()),
|
||||
BackupReference: os.Getenv("BACKUP_REFERENCE"),
|
||||
}, "email-error.template")
|
||||
if err != nil {
|
||||
Error("Could not parse error template: %v", err)
|
||||
}
|
||||
err = SendEmail(fmt.Sprintf("🔴 Urgent: Database Backup Failure Notification"), body)
|
||||
if err != nil {
|
||||
Error("Could not send email: %v", err)
|
||||
}
|
||||
}
|
||||
//Telegram notification
|
||||
err = CheckEnvVars(vars)
|
||||
if err == nil {
|
||||
message, err := parseTemplate(ErrorMessage{
|
||||
Error: error,
|
||||
EndTime: time.Now().Format(TimeFormat()),
|
||||
BackupReference: os.Getenv("BACKUP_REFERENCE"),
|
||||
}, "telegram-error.template")
|
||||
if err != nil {
|
||||
Error("Could not parse error template: %v", err)
|
||||
|
||||
}
|
||||
|
||||
err = sendMessage(message)
|
||||
if err != nil {
|
||||
Error("Could not send telegram message: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTgUrl() string {
|
||||
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
||||
|
||||
}
|
||||
func IsValidCronExpression(cronExpr string) bool {
|
||||
_, err := cron.ParseStandard(cronExpr)
|
||||
return err == nil
|
||||
}
|
||||
@@ -1,21 +1,21 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
/*****
|
||||
* MySQL Backup & Restore
|
||||
* @author Jonas Kaninda
|
||||
* @license MIT License <https://opensource.org/licenses/MIT>
|
||||
* @link https://github.com/jkaninda/mysql-bkup
|
||||
**/
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// FileExists checks if the file does exist
|
||||
func FileExists(filename string) bool {
|
||||
info, err := os.Stat(filename)
|
||||
if os.IsNotExist(err) {
|
||||
@@ -90,34 +90,6 @@ func IsDirEmpty(name string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TestDatabaseConnection tests the database connection
|
||||
func TestDatabaseConnection() {
|
||||
dbHost := os.Getenv("DB_HOST")
|
||||
dbPassword := os.Getenv("DB_PASSWORD")
|
||||
dbUserName := os.Getenv("DB_USERNAME")
|
||||
dbName := os.Getenv("DB_NAME")
|
||||
dbPort := os.Getenv("DB_PORT")
|
||||
|
||||
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
|
||||
Fatal("Please make sure all required database environment variables are set")
|
||||
} else {
|
||||
Info("Connecting to database ...")
|
||||
|
||||
cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
|
||||
|
||||
// Capture the output
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
Error("Error testing database connection: %v\nOutput: %s", err, out.String())
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
Info("Successfully connected to database")
|
||||
}
|
||||
}
|
||||
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
|
||||
value, _ := cmd.Flags().GetString(flagName)
|
||||
if value != "" {
|
||||
@@ -157,14 +129,11 @@ func GetEnvVariable(envName, oldEnvName string) string {
|
||||
if err != nil {
|
||||
return value
|
||||
}
|
||||
Warn("%s is deprecated, please use %s instead!", oldEnvName, envName)
|
||||
|
||||
Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
func ShowHistory() {
|
||||
}
|
||||
|
||||
// CheckEnvVars checks if all the specified environment variables are set
|
||||
func CheckEnvVars(vars []string) error {
|
||||
@@ -182,3 +151,32 @@ func CheckEnvVars(vars []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeDir create directory
|
||||
func MakeDir(dirPath string) error {
|
||||
err := os.Mkdir(dirPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeDirAll create directory
|
||||
func MakeDirAll(dirPath string) error {
|
||||
err := os.MkdirAll(dirPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func GetIntEnv(envName string) int {
|
||||
val := os.Getenv(envName)
|
||||
if val == "" {
|
||||
return 0
|
||||
}
|
||||
ret, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
Error("Error: %v", err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user