mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 13:39:41 +01:00
Compare commits
73 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
041e0a07e9 | ||
|
|
9daac9c654 | ||
|
|
f6098769cd | ||
|
|
5cdfaa4d94 | ||
|
|
b205cd61ea | ||
|
|
e1307250e8 | ||
|
|
17ac951deb | ||
|
|
6e2e08224d | ||
|
|
570b775f48 | ||
|
|
e38e106983 | ||
|
|
3040420a09 | ||
|
|
eac5f70408 | ||
|
|
3476c6f529 | ||
|
|
1a9c8483f8 | ||
|
|
f8722f7ae4 | ||
|
|
421bf12910 | ||
|
|
3da4a27baa | ||
|
|
0881f075ef | ||
|
|
066e73f8e4 | ||
|
|
645243ff77 | ||
|
|
9384998127 | ||
|
|
390e7dad0c | ||
|
|
67ea22385f | ||
|
|
cde82d8cfc | ||
|
|
4808f093e5 | ||
|
|
c7a03861fe | ||
|
|
36ec63d522 | ||
|
|
0f07de1d83 | ||
|
|
ae55839996 | ||
|
|
a7f7e57a0d | ||
|
|
b2ddaec93b | ||
|
|
b3570d774c | ||
|
|
38f7e91c03 | ||
|
|
07c2935925 | ||
|
|
f3c5585051 | ||
|
|
7163d030a5 | ||
|
|
a2cec86e73 | ||
|
|
662b73579d | ||
| c9f8a32de1 | |||
| 8fb008151c | |||
| 113c84c885 | |||
| 58deb92953 | |||
| c41afb8b57 | |||
| 02e51a3933 | |||
| db4061b64b | |||
| 9467b157aa | |||
| c229ebdc9d | |||
| 7b701d1740 | |||
| ad6f190bad | |||
| de4dcaaeca | |||
| 17c0a99bda | |||
| b1c9abf931 | |||
| a70a893c11 | |||
| 243e25f4fb | |||
| cb0dcf4104 | |||
| d26d8d31c9 | |||
| 71d438ba76 | |||
| a3fc58af96 | |||
| 08ca6d4a39 | |||
| 27b9ab5f36 | |||
| 6d6db7061b | |||
| d90647aae7 | |||
| 5c2c05499f | |||
| 88ada6fefd | |||
| e6c8b0923d | |||
| 59a136039c | |||
| db835e81c4 | |||
| 5b05bcbf0c | |||
| b8277c8464 | |||
| 70338b6ae6 | |||
| 33b1acf7c0 | |||
| 9a4d02f648 | |||
| 1e06600c43 |
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
ko_fi: jkaninda
|
||||
32
.github/workflows/build.yml
vendored
Normal file
32
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Build
|
||||
on:
|
||||
push:
|
||||
branches: ['develop']
|
||||
env:
|
||||
BUILDKIT_IMAGE: jkaninda/mysql-bkup
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
file: "./docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
tags: |
|
||||
"${{env.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Release
|
||||
name: CI
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -8,4 +8,5 @@ test.md
|
||||
mysql-bkup
|
||||
/.DS_Store
|
||||
/.idea
|
||||
bin
|
||||
bin
|
||||
Makefile
|
||||
46
Makefile
46
Makefile
@@ -1,46 +0,0 @@
|
||||
BINARY_NAME=mysql-bkup
|
||||
include .env
|
||||
export
|
||||
run:
|
||||
go run . backup
|
||||
|
||||
build:
|
||||
go build -o bin/${BINARY_NAME} .
|
||||
|
||||
compile:
|
||||
GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 .
|
||||
GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 .
|
||||
GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 .
|
||||
GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 .
|
||||
|
||||
docker-build:
|
||||
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
|
||||
|
||||
docker-run: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2
|
||||
docker-restore: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore -f ${FILE_NAME}
|
||||
|
||||
|
||||
docker-run-scheduled: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --mode scheduled --period "* * * * *"
|
||||
|
||||
|
||||
docker-run-scheduled-s3: docker-build
|
||||
docker run --rm --network web --user 1000:1000 --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
||||
|
||||
docker-run-s3: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --path /custom-path
|
||||
|
||||
|
||||
docker-restore-s3: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore --storage s3 -f ${FILE_NAME} --path /custom-path
|
||||
|
||||
docker-run-ssh: docker-build
|
||||
docker run --rm --network web -v "${SSH_IDENTIFY_FILE_LOCAL}:" --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage ssh
|
||||
|
||||
docker-restore-ssh: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/mysql-bkup bkup restore --storage ssh -f ${FILE_NAME}
|
||||
|
||||
run-docs:
|
||||
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t
|
||||
106
README.md
106
README.md
@@ -1,19 +1,23 @@
|
||||
# MySQL Backup
|
||||
mysql-bkup is a Docker container image that can be used to backup and restore Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
|
||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
||||
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
It also supports database __encryption__ using GPG.
|
||||
|
||||
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
||||
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
||||

|
||||

|
||||
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
|
||||
|
||||
Successfully tested on:
|
||||
- Docker
|
||||
- Docker in Swarm mode
|
||||
- Kubernetes
|
||||
- OpenShift
|
||||
|
||||
## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
|
||||
|
||||
@@ -30,13 +34,13 @@ It also supports __encrypting__ your backups using GPG.
|
||||
## Storage:
|
||||
- Local
|
||||
- AWS S3 or any S3 Alternatives for Object Storage
|
||||
- SSH
|
||||
- SSH remote server
|
||||
|
||||
## Quickstart
|
||||
|
||||
### Simple backup using Docker CLI
|
||||
|
||||
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
|
||||
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
|
||||
|
||||
```shell
|
||||
docker run --rm --network your_network_name \
|
||||
@@ -44,11 +48,17 @@ To run a one time backup, bind your local volume to `/backup` in the container a
|
||||
-e "DB_HOST=dbhost" \
|
||||
-e "DB_USERNAME=username" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup mysql-bkup backup -d database_name
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
|
||||
```yaml
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env-file \
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
### Simple backup in docker compose file
|
||||
|
||||
@@ -61,15 +71,12 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup
|
||||
command: backup
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=5432
|
||||
- DB_HOST=postgres
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=foo
|
||||
- DB_USERNAME=bar
|
||||
- DB_PASSWORD=password
|
||||
@@ -81,53 +88,50 @@ networks:
|
||||
```
|
||||
## Deploy on Kubernetes
|
||||
|
||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
|
||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
|
||||
|
||||
### Simple Kubernetes CronJob usage:
|
||||
### Simple Kubernetes backup Job :
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
kind: Job
|
||||
metadata:
|
||||
name: bkup-job
|
||||
name: backup-job
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
jobTemplate:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
containers:
|
||||
- name: pg-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "5432"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
- backup -d dbname
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_HOST
|
||||
value: "mysql"
|
||||
- name: DB_USERNAME
|
||||
value: "user"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
volumeMounts:
|
||||
- mountPath: /backup
|
||||
name: backup
|
||||
volumes:
|
||||
- name: backup
|
||||
hostPath:
|
||||
path: /home/toto/backup # directory location on host
|
||||
type: Directory # this field is optional
|
||||
restartPolicy: Never
|
||||
```
|
||||
## Available image registries
|
||||
|
||||
@@ -135,8 +139,8 @@ This Docker image is published to both Docker Hub and the GitHub container regis
|
||||
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
|
||||
|
||||
```
|
||||
docker pull jkaninda/mysql-bkup:v1.0
|
||||
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
|
||||
docker pull jkaninda/mysql-bkup
|
||||
docker pull ghcr.io/jkaninda/mysql-bkup
|
||||
```
|
||||
|
||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -21,6 +27,8 @@ var BackupCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
//Backup
|
||||
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
|
||||
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
|
||||
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
||||
|
||||
27
cmd/migrate.go
Normal file
27
cmd/migrate.go
Normal file
@@ -0,0 +1,27 @@
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/jkaninda/mysql-bkup/pkg"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var MigrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: "Migrate database from a source database to a target database",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
pkg.StartMigration(cmd)
|
||||
} else {
|
||||
utils.Fatal("Error, no argument required")
|
||||
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
@@ -24,5 +24,7 @@ var RestoreCmd = &cobra.Command{
|
||||
func init() {
|
||||
//Restore
|
||||
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
|
||||
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
|
||||
}
|
||||
|
||||
17
cmd/root.go
17
cmd/root.go
@@ -1,7 +1,9 @@
|
||||
// Package cmd /*
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -30,13 +32,10 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
|
||||
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
|
||||
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
|
||||
|
||||
rootCmd.AddCommand(VersionCmd)
|
||||
rootCmd.AddCommand(BackupCmd)
|
||||
rootCmd.AddCommand(RestoreCmd)
|
||||
rootCmd.AddCommand(MigrateCmd)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -10,17 +10,18 @@ RUN go mod download
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
||||
|
||||
FROM ubuntu:24.04
|
||||
ENV DB_HOST=""
|
||||
ENV DB_HOST="localhost"
|
||||
ENV DB_NAME=""
|
||||
ENV DB_USERNAME=""
|
||||
ENV DB_PASSWORD=""
|
||||
ENV DB_PORT="3306"
|
||||
ENV DB_PORT=3306
|
||||
ENV STORAGE=local
|
||||
ENV AWS_S3_ENDPOINT=""
|
||||
ENV AWS_S3_BUCKET_NAME=""
|
||||
ENV AWS_ACCESS_KEY=""
|
||||
ENV AWS_SECRET_KEY=""
|
||||
ENV AWS_REGION="us-west-2"
|
||||
ENV AWS_S3_PATH=""
|
||||
ENV AWS_DISABLE_SSL="false"
|
||||
ENV GPG_PASSPHRASE=""
|
||||
ENV SSH_USER=""
|
||||
@@ -29,9 +30,17 @@ ENV SSH_PASSWORD=""
|
||||
ENV SSH_HOST_NAME=""
|
||||
ENV SSH_IDENTIFY_FILE=""
|
||||
ENV SSH_PORT="22"
|
||||
ENV TARGET_DB_HOST=""
|
||||
ENV TARGET_DB_PORT=3306
|
||||
ENV TARGET_DB_NAME="localhost"
|
||||
ENV TARGET_DB_USERNAME=""
|
||||
ENV TARGET_DB_PASSWORD=""
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV VERSION="v1.0"
|
||||
ARG WORKDIR="/app"
|
||||
ENV VERSION="v1.2.7"
|
||||
ENV BACKUP_CRON_EXPRESSION=""
|
||||
ENV TG_TOKEN=""
|
||||
ENV TG_CHAT_ID=""
|
||||
ARG WORKDIR="/config"
|
||||
ARG BACKUPDIR="/backup"
|
||||
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
|
||||
@@ -39,7 +48,6 @@ ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
|
||||
LABEL author="Jonas Kaninda"
|
||||
|
||||
RUN apt-get update -qq
|
||||
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
|
||||
RUN apt install mysql-client supervisor cron gnupg -y
|
||||
|
||||
# Clear cache
|
||||
@@ -63,4 +71,15 @@ RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||
|
||||
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
|
||||
WORKDIR $WORKDIR
|
||||
# Create backup script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
||||
chmod +x /usr/local/bin/backup
|
||||
# Create restore script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
|
||||
chmod +x /usr/local/bin/restore
|
||||
# Create migrate script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
|
||||
chmod +x /usr/local/bin/migrate
|
||||
|
||||
WORKDIR $WORKDIR
|
||||
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
FROM ruby:3.3.4
|
||||
|
||||
ENV LC_ALL C.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US.UTF-8
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
COPY . ./
|
||||
RUN gem install bundler && bundle install
|
||||
|
||||
EXPOSE 4000
|
||||
@@ -13,10 +13,11 @@
|
||||
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
|
||||
# You can create any custom variable you would like, and they will be accessible
|
||||
# in the templates via {{ site.myvariable }}.
|
||||
title: MySQL database backup
|
||||
title: MySQL Backup Docker container image
|
||||
email: hi@jonaskaninda.com
|
||||
description: >- # this means to ignore newlines until "baseurl:"
|
||||
MySQL Backup and Restore Docker container image. Backup database to AWS S3 storage or SSH remote server.
|
||||
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database.
|
||||
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||
|
||||
baseurl: "" # the subpath of your site, e.g. /blog
|
||||
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
services:
|
||||
jekyll:
|
||||
build:
|
||||
context: ./
|
||||
ports:
|
||||
- 4000:4000
|
||||
environment:
|
||||
- JEKYLL_ENV=development
|
||||
volumes:
|
||||
- .:/usr/src/app
|
||||
stdin_open: true
|
||||
tty: true
|
||||
command: bundle exec jekyll serve -H 0.0.0.0 -t
|
||||
@@ -22,10 +22,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage s3 -d database --path /my-custom-path
|
||||
command: backup --storage s3 -d database --path /my-custom-path
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
@@ -62,10 +59,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
@@ -110,7 +104,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
- backup -s s3 --path /custom_path
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
|
||||
@@ -7,7 +7,7 @@ nav_order: 3
|
||||
# Backup to SSH remote server
|
||||
|
||||
|
||||
As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`.
|
||||
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
|
||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
|
||||
|
||||
{: .note }
|
||||
@@ -23,16 +23,13 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage remote -d database
|
||||
command: backup --storage remote -d database
|
||||
volumes:
|
||||
- ./id_ed25519:/tmp/id_ed25519"
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=database
|
||||
#- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## SSH config
|
||||
@@ -66,10 +63,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -d database --storage s3 --mode scheduled --period "0 1 * * *"
|
||||
command: backup -d database --storage ssh --mode scheduled --period "0 1 * * *"
|
||||
volumes:
|
||||
- ./id_ed25519:/tmp/id_ed25519"
|
||||
environment:
|
||||
@@ -117,7 +111,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
- backup -s ssh
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
@@ -141,6 +135,6 @@ spec:
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: SSH_IDENTIFY_FILE
|
||||
value: "/home/jkaninda/backups"
|
||||
restartPolicy: OnFailure
|
||||
value: "/tmp/id_ed25519"
|
||||
restartPolicy: Never
|
||||
```
|
||||
@@ -7,7 +7,7 @@ nav_order: 1
|
||||
|
||||
# Backup database
|
||||
|
||||
To backup the database, you need to add `backup` subcommand to `mysql-bkup` or `bkup`.
|
||||
To backup the database, you need to add `backup` command.
|
||||
|
||||
{: .note }
|
||||
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
|
||||
@@ -27,10 +27,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -d database
|
||||
command: backup -d database
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -54,7 +51,7 @@ networks:
|
||||
-e "DB_HOST=dbhost" \
|
||||
-e "DB_USERNAME=username" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup mysql-bkup backup -d database_name
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||
@@ -68,10 +65,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -d database --mode scheduled --period "0 1 * * *"
|
||||
command: backup -d database --mode scheduled --period "0 1 * * *"
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
|
||||
303
docs/how-tos/deploy-on-kubernetes.md
Normal file
303
docs/how-tos/deploy-on-kubernetes.md
Normal file
@@ -0,0 +1,303 @@
|
||||
---
|
||||
title: Deploy on Kubernetes
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 8
|
||||
---
|
||||
|
||||
## Deploy on Kubernetes
|
||||
|
||||
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
|
||||
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
|
||||
|
||||
## Backup to S3 storage
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: backup
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- backup --storage s3
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Backup Job to SSH remote server
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: backup
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- ssh
|
||||
- --disable-compression
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: "xxx"
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "xxx"
|
||||
- name: SSH_PASSWORD
|
||||
value: "xxxx"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/toto/backup"
|
||||
# Optional, required if you want to encrypt your backup
|
||||
- name: GPG_PASSPHRASE
|
||||
value: "xxxx"
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Restore Job
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: restore-job
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- restore
|
||||
- --storage
|
||||
- ssh
|
||||
- --file store_20231219_022941.sql.gz
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: "xxx"
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "xxx"
|
||||
- name: SSH_PASSWORD
|
||||
value: "xxxx"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/xxxx/backup"
|
||||
# Optional, required if your backup was encrypted
|
||||
#- name: GPG_PASSPHRASE
|
||||
# value: "xxxx"
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Recurring backup
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: backup-job
|
||||
spec:
|
||||
schedule: "* * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- ssh
|
||||
- --disable-compression
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "username"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: "xxx"
|
||||
- name: SSH_PORT
|
||||
value: "xxx"
|
||||
- name: SSH_USER
|
||||
value: "jkaninda"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/jkaninda/backup"
|
||||
- name: SSH_PASSWORD
|
||||
value: "password"
|
||||
# Optional, required if you want to encrypt your backup
|
||||
#- name: GPG_PASSPHRASE
|
||||
# value: "xxx"
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Kubernetes Rootless
|
||||
|
||||
This image also supports Kubernetes security context, you can run it in Rootless environment.
|
||||
It has been tested on Openshift, it works well.
|
||||
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: backup-job
|
||||
spec:
|
||||
schedule: "* * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 3000
|
||||
fsGroup: 2000
|
||||
containers:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- ssh
|
||||
- --disable-compression
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "xxx"
|
||||
- name: DB_USERNAME
|
||||
value: "xxx"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: "xxx"
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "jkaninda"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/jkaninda/backup"
|
||||
- name: SSH_PASSWORD
|
||||
value: "password"
|
||||
# Optional, required if you want to encrypt your backup
|
||||
#- name: GPG_PASSPHRASE
|
||||
# value: "xxx"
|
||||
restartPolicy: OnFailure
|
||||
```
|
||||
@@ -11,7 +11,7 @@ The image supports encrypting backups using GPG out of the box. In case a `GPG_P
|
||||
{: .warning }
|
||||
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
|
||||
|
||||
To decrypt manually, you need to install gnupg
|
||||
To decrypt manually, you need to install `gnupg`
|
||||
|
||||
### Decrypt backup
|
||||
|
||||
@@ -32,10 +32,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -d database
|
||||
command: backup -d database
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
|
||||
131
docs/how-tos/migrate.md
Normal file
131
docs/how-tos/migrate.md
Normal file
@@ -0,0 +1,131 @@
|
||||
---
|
||||
title: Migrate database
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 9
|
||||
---
|
||||
|
||||
# Migrate database
|
||||
|
||||
To migrate the database, you need to add `migrate` command.
|
||||
|
||||
{: .note }
|
||||
The Mysql backup has another great feature: migrating your database from a source database to a target.
|
||||
|
||||
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
|
||||
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
|
||||
|
||||
{: .warning }
|
||||
The `migrate` operation is irreversible, please backup your target database before this action.
|
||||
|
||||
### Docker compose
|
||||
```yml
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: migrate
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
## Source database
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## Target database
|
||||
- TARGET_DB_HOST=target-mysql
|
||||
- TARGET_DB_PORT=3306
|
||||
- TARGET_DB_NAME=dbname
|
||||
- TARGET_DB_USERNAME=username
|
||||
- TARGET_DB_PASSWORD=password
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
|
||||
|
||||
### Migrate database using Docker CLI
|
||||
|
||||
|
||||
```
|
||||
## Source database
|
||||
DB_HOST=mysql
|
||||
DB_PORT=3306
|
||||
DB_NAME=dbname
|
||||
DB_USERNAME=username
|
||||
DB_PASSWORD=password
|
||||
|
||||
## Taget database
|
||||
TARGET_DB_HOST=target-mysql
|
||||
TARGET_DB_PORT=3306
|
||||
TARGET_DB_NAME=dbname
|
||||
TARGET_DB_USERNAME=username
|
||||
TARGET_DB_PASSWORD=password
|
||||
```
|
||||
|
||||
```shell
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup migrate
|
||||
```
|
||||
|
||||
## Kubernetes
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: migrate-db
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- migrate
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
## Source Database
|
||||
- name: DB_HOST
|
||||
value: "mysql"
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
## Target Database
|
||||
- name: TARGET_DB_HOST
|
||||
value: "target-mysql"
|
||||
- name: TARGET_DB_PORT
|
||||
value: "3306"
|
||||
- name: TARGET_DB_NAME
|
||||
value: "dbname"
|
||||
- name: TARGET_DB_USERNAME
|
||||
value: "username"
|
||||
- name: TARGET_DB_PASSWORD
|
||||
value: "password"
|
||||
restartPolicy: Never
|
||||
```
|
||||
@@ -7,7 +7,7 @@ nav_order: 5
|
||||
|
||||
# Restore database from S3 storage
|
||||
|
||||
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
|
||||
{: .note }
|
||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
||||
@@ -23,10 +23,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
|
||||
command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -48,4 +45,51 @@ services:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
```
|
||||
|
||||
## Restore on Kubernetes
|
||||
|
||||
Simple Kubernetes restore Job:
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: restore-db
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
||||
```
|
||||
|
||||
@@ -6,7 +6,7 @@ nav_order: 6
|
||||
---
|
||||
# Restore database from SSH remote server
|
||||
|
||||
To restore the database from your remote server, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
|
||||
{: .note }
|
||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
||||
@@ -22,10 +22,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
|
||||
command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -47,4 +44,50 @@ services:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
## Restore on Kubernetes
|
||||
|
||||
Simple Kubernetes restore Job:
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: restore-db
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- restore -s ssh -f store_20231219_022941.sql.gz
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: SSH_HOST_NAME
|
||||
value: ""
|
||||
- name: SSH_PORT
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "xxx"
|
||||
- name: SSH_REMOTE_PATH
|
||||
value: "/home/jkaninda/backups"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: SSH_IDENTIFY_FILE
|
||||
value: "/tmp/id_ed25519"
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
||||
```
|
||||
@@ -7,7 +7,7 @@ nav_order: 4
|
||||
|
||||
# Restore database
|
||||
|
||||
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||
|
||||
{: .note }
|
||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
||||
@@ -23,10 +23,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup restore -d database -f store_20231219_022941.sql.gz
|
||||
command: restore -d database -f store_20231219_022941.sql.gz
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
|
||||
@@ -6,7 +6,7 @@ nav_order: 1
|
||||
|
||||
# About mysql-bkup
|
||||
{:.no_toc}
|
||||
mysql-bkup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage.
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
|
||||
We are open to receiving stars, PRs, and issues!
|
||||
@@ -19,7 +19,8 @@ We are open to receiving stars, PRs, and issues!
|
||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
||||
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
It also supports database __encryption__ using GPG.
|
||||
|
||||
|
||||
{: .note }
|
||||
Code and documentation for `v1` version on [this branch][v1-branch].
|
||||
@@ -32,7 +33,7 @@ Code and documentation for `v1` version on [this branch][v1-branch].
|
||||
|
||||
### Simple backup using Docker CLI
|
||||
|
||||
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
|
||||
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
|
||||
|
||||
```shell
|
||||
docker run --rm --network your_network_name \
|
||||
@@ -40,11 +41,18 @@ To run a one time backup, bind your local volume to `/backup` in the container a
|
||||
-e "DB_HOST=dbhost" \
|
||||
-e "DB_USERNAME=username" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup mysql-bkup backup -d database_name
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
|
||||
```yaml
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env-file \
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
### Simple backup in docker compose file
|
||||
|
||||
```yaml
|
||||
@@ -56,15 +64,12 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup
|
||||
command: backup
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=postgres
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=foo
|
||||
- DB_USERNAME=bar
|
||||
- DB_PASSWORD=password
|
||||
@@ -74,6 +79,49 @@ services:
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
## Kubernetes
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: backup-job
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- backup -d dbname
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_HOST
|
||||
value: "mysql"
|
||||
- name: DB_USERNAME
|
||||
value: "user"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
volumeMounts:
|
||||
- mountPath: /backup
|
||||
name: backup
|
||||
volumes:
|
||||
- name: backup
|
||||
hostPath:
|
||||
path: /home/toto/backup # directory location on host
|
||||
type: Directory # this field is optional
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Available image registries
|
||||
|
||||
@@ -81,8 +129,8 @@ This Docker image is published to both Docker Hub and the GitHub container regis
|
||||
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
|
||||
|
||||
```
|
||||
docker pull jkaninda/mysql-bkup:v1.0
|
||||
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
|
||||
docker pull jkaninda/mysql-bkup
|
||||
docker pull ghcr.io/jkaninda/mysql-bkup
|
||||
```
|
||||
|
||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||
|
||||
@@ -21,7 +21,7 @@ In the old version, S3 storage was mounted using s3fs, so we decided to migrate
|
||||
|
||||
| Options | Shorts | Usage |
|
||||
|-----------------------|--------|------------------------------------------------------------------------|
|
||||
| mysql-bkup | bkup | CLI utility |
|
||||
| mysql-bkup | bkup | CLI utility |
|
||||
| backup | | Backup database operation |
|
||||
| restore | | Restore database operation |
|
||||
| history | | Show the history of backup |
|
||||
|
||||
@@ -6,7 +6,7 @@ nav_order: 2
|
||||
|
||||
# Configuration reference
|
||||
|
||||
Backup and restore targets, schedule and retention are configured using environment variables or flags.
|
||||
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
|
||||
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ Backup and restore targets, schedule and retention are configured using environm
|
||||
| mysql-bkup | bkup | CLI utility |
|
||||
| backup | | Backup database operation |
|
||||
| restore | | Restore database operation |
|
||||
| migrate | | Migrate database from one instance to another one |
|
||||
| --storage | -s | Storage. local or s3 (default: local) |
|
||||
| --file | -f | File name for restoration |
|
||||
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
|
||||
@@ -34,28 +35,35 @@ Backup and restore targets, schedule and retention are configured using environm
|
||||
|
||||
## Environment variables
|
||||
|
||||
| Name | Requirement | Description |
|
||||
|-------------------|--------------------------------------------------|------------------------------------------------------|
|
||||
| DB_PORT | Optional, default 3306 | Database port number |
|
||||
| DB_HOST | Required | Database host |
|
||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||
| DB_USERNAME | Required | Database user name |
|
||||
| DB_PASSWORD | Required | Database password |
|
||||
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||
| Gmysql_PASSPHRASE | Optional, required to encrypt and restore backup | Gmysql passphrase |
|
||||
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
||||
|
||||
| Name | Requirement | Description |
|
||||
|------------------------|----------------------------------------------------|------------------------------------------------------|
|
||||
| DB_PORT | Optional, default 3306 | Database port number |
|
||||
| DB_HOST | Required | Database host |
|
||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||
| DB_USERNAME | Required | Database user name |
|
||||
| DB_PASSWORD | Required | Database password |
|
||||
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
|
||||
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
||||
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
||||
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
||||
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
||||
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
||||
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
||||
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
||||
| TG_TOKEN | Optional, required for Telegram notification | Telegram token |
|
||||
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
|
||||
---
|
||||
## Run in Scheduled mode
|
||||
|
||||
|
||||
@@ -6,10 +6,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage s3 -d my-database"
|
||||
command: backup --storage s3 -d my-database"
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
version: "3"
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --dbname database_name --mode scheduled --period "0 1 * * *"
|
||||
command: backup --dbname database_name --mode scheduled --period "0 1 * * *"
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
|
||||
@@ -6,10 +6,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
|
||||
@@ -3,10 +3,7 @@ services:
|
||||
mysql-bkup:
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup --dbname database_name
|
||||
command: backup --dbname database_name
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
|
||||
@@ -1,44 +1,47 @@
|
||||
piVersion: batch/v1
|
||||
kind: CronJob
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: bkup-job
|
||||
name: backup
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
jobTemplate:
|
||||
template:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: ACCESS_KEY
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- backup --storage s3
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
13
main.go
13
main.go
@@ -1,12 +1,11 @@
|
||||
// Package main /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package main
|
||||
|
||||
//main
|
||||
/*****
|
||||
* MySQL Backup & Restore
|
||||
* @author Jonas Kaninda
|
||||
* @license MIT License <https://opensource.org/licenses/MIT>
|
||||
* @link https://github.com/jkaninda/mysql-bkup
|
||||
**/
|
||||
import "github.com/jkaninda/mysql-bkup/cmd"
|
||||
|
||||
func main() {
|
||||
|
||||
107
pkg/backup.go
107
pkg/backup.go
@@ -1,7 +1,9 @@
|
||||
// Package pkg /*
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
@@ -17,15 +19,12 @@ import (
|
||||
)
|
||||
|
||||
func StartBackup(cmd *cobra.Command) {
|
||||
_, _ = cmd.Flags().GetString("operation")
|
||||
intro()
|
||||
//Set env
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
utils.GetEnv(cmd, "port", "DB_PORT")
|
||||
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
|
||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
||||
|
||||
//Get flag value and set env
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
@@ -33,35 +32,38 @@ func StartBackup(cmd *cobra.Command) {
|
||||
prune, _ := cmd.Flags().GetBool("prune")
|
||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||
executionMode, _ = cmd.Flags().GetString("mode")
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
|
||||
dbConf = getDbConfig(cmd)
|
||||
|
||||
//
|
||||
if gpqPassphrase != "" {
|
||||
encryption = true
|
||||
}
|
||||
|
||||
//Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
if disableCompression {
|
||||
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
|
||||
backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
}
|
||||
|
||||
if executionMode == "default" {
|
||||
switch storage {
|
||||
case "s3":
|
||||
s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption)
|
||||
s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "local":
|
||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "ssh", "remote":
|
||||
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
||||
sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
||||
case "ftp":
|
||||
utils.Fatal("Not supported storage type: %s", storage)
|
||||
default:
|
||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
}
|
||||
|
||||
} else if executionMode == "scheduled" {
|
||||
scheduledMode()
|
||||
scheduledMode(dbConf, storage)
|
||||
} else {
|
||||
utils.Fatal("Error, unknown execution mode!")
|
||||
}
|
||||
@@ -69,17 +71,18 @@ func StartBackup(cmd *cobra.Command) {
|
||||
}
|
||||
|
||||
// Run in scheduled mode
|
||||
func scheduledMode() {
|
||||
func scheduledMode(db *dbConfig, storage string) {
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("**********************************")
|
||||
fmt.Println(" Starting MySQL Bkup... ")
|
||||
fmt.Println("***********************************")
|
||||
utils.Info("Running in Scheduled mode")
|
||||
utils.Info("Execution period %s", os.Getenv("SCHEDULE_PERIOD"))
|
||||
utils.Info("Execution period %s", os.Getenv("BACKUP_CRON_EXPRESSION"))
|
||||
utils.Info("Storage type %s ", storage)
|
||||
|
||||
//Test database connexion
|
||||
utils.TestDatabaseConnection()
|
||||
testDatabaseConnection(db)
|
||||
|
||||
utils.Info("Creating backup job...")
|
||||
CreateCrontabScript(disableCompression, storage)
|
||||
@@ -113,14 +116,13 @@ func scheduledMode() {
|
||||
fmt.Println(line.Text)
|
||||
}
|
||||
}
|
||||
func intro() {
|
||||
utils.Info("Starting MySQL Backup...")
|
||||
utils.Info("Copyright © 2024 Jonas Kaninda ")
|
||||
}
|
||||
|
||||
// BackupDatabase backup database
|
||||
func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
dbHost = os.Getenv("DB_HOST")
|
||||
dbPassword = os.Getenv("DB_PASSWORD")
|
||||
dbUserName = os.Getenv("DB_USERNAME")
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
dbPort = os.Getenv("DB_PORT")
|
||||
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
|
||||
storagePath = os.Getenv("STORAGE_PATH")
|
||||
|
||||
err := utils.CheckEnvVars(dbHVars)
|
||||
@@ -130,7 +132,11 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
}
|
||||
|
||||
utils.Info("Starting database backup...")
|
||||
utils.TestDatabaseConnection()
|
||||
err = os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
testDatabaseConnection(db)
|
||||
|
||||
// Backup Database database
|
||||
utils.Info("Backing up database...")
|
||||
@@ -138,11 +144,10 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
if disableCompression {
|
||||
// Execute mysqldump
|
||||
cmd := exec.Command("mysqldump",
|
||||
"-h", dbHost,
|
||||
"-P", dbPort,
|
||||
"-u", dbUserName,
|
||||
"--password="+dbPassword,
|
||||
dbName,
|
||||
"-h", db.dbHost,
|
||||
"-P", db.dbPort,
|
||||
"-u", db.dbUserName,
|
||||
db.dbName,
|
||||
)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -164,7 +169,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
|
||||
} else {
|
||||
// Execute mysqldump
|
||||
cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName)
|
||||
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -187,9 +192,9 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
}
|
||||
|
||||
}
|
||||
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
utils.Info("Backup database to local storage")
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
BackupDatabase(db, backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
@@ -197,23 +202,28 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
|
||||
}
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
moveToBackup(finalFileName, storagePath)
|
||||
//Send notification
|
||||
utils.NotifySuccess(finalFileName)
|
||||
//Delete old backup
|
||||
if prune {
|
||||
deleteOldBackup(backupRetention)
|
||||
}
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
|
||||
func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
||||
utils.Info("Backup database to s3 storage")
|
||||
//Backup database
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
BackupDatabase(db, backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup file to S3 storage...")
|
||||
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||
if err != nil {
|
||||
@@ -234,18 +244,24 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru
|
||||
utils.Fatal("Error deleting old backup from S3: %s ", err)
|
||||
}
|
||||
}
|
||||
utils.Done("Database has been backed up and uploaded to s3 ")
|
||||
utils.Done("Uploading backup archive to remote storage S3 ... done ")
|
||||
//Send notification
|
||||
utils.NotifySuccess(finalFileName)
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
|
||||
// sshBackup backup database to SSH remote server
|
||||
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
utils.Info("Backup database to Remote server")
|
||||
//Backup database
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
BackupDatabase(db, backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup file to remote server...")
|
||||
utils.Info("Uploading backup archive to remote storage ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := CopyToRemote(finalFileName, remotePath)
|
||||
if err != nil {
|
||||
@@ -265,9 +281,14 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune
|
||||
|
||||
}
|
||||
|
||||
utils.Done("Database has been backed up and uploaded to remote server ")
|
||||
utils.Done("Uploading backup archive to remote storage ... done ")
|
||||
//Send notification
|
||||
utils.NotifySuccess(finalFileName)
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
|
||||
// encryptBackup encrypt backup
|
||||
func encryptBackup(backupFileName string) {
|
||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
|
||||
|
||||
@@ -1,4 +1,64 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
}
|
||||
|
||||
type dbConfig struct {
|
||||
dbHost string
|
||||
dbPort string
|
||||
dbName string
|
||||
dbUserName string
|
||||
dbPassword string
|
||||
}
|
||||
type targetDbConfig struct {
|
||||
targetDbHost string
|
||||
targetDbPort string
|
||||
targetDbUserName string
|
||||
targetDbPassword string
|
||||
targetDbName string
|
||||
}
|
||||
|
||||
func getDbConfig(cmd *cobra.Command) *dbConfig {
|
||||
//Set env
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
dConf := dbConfig{}
|
||||
dConf.dbHost = os.Getenv("DB_HOST")
|
||||
dConf.dbPort = os.Getenv("DB_PORT")
|
||||
dConf.dbName = os.Getenv("DB_NAME")
|
||||
dConf.dbUserName = os.Getenv("DB_USERNAME")
|
||||
dConf.dbPassword = os.Getenv("DB_PASSWORD")
|
||||
|
||||
err := utils.CheckEnvVars(dbHVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for database are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
return &dConf
|
||||
}
|
||||
func getTargetDbConfig() *targetDbConfig {
|
||||
tdbConfig := targetDbConfig{}
|
||||
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
||||
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
|
||||
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
|
||||
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
|
||||
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
|
||||
|
||||
err := utils.CheckEnvVars(tdbRVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for the target database are set")
|
||||
utils.Fatal("Error checking target database environment variables: %s", err)
|
||||
}
|
||||
return &tdbConfig
|
||||
}
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
@@ -9,11 +15,17 @@ import (
|
||||
|
||||
func Decrypt(inputFile string, passphrase string) error {
|
||||
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
||||
//Create gpg home dir
|
||||
err := utils.MakeDir(gpgHome)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.SetEnv("GNUPGHOME", gpgHome)
|
||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Run()
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -24,11 +36,17 @@ func Decrypt(inputFile string, passphrase string) error {
|
||||
|
||||
func Encrypt(inputFile string, passphrase string) error {
|
||||
utils.Info("Encrypting backup...")
|
||||
//Create gpg home dir
|
||||
err := utils.MakeDir(gpgHome)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.SetEnv("GNUPGHOME", gpgHome)
|
||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Run()
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
@@ -71,4 +79,49 @@ func deleteOldBackup(retentionDays int) {
|
||||
utils.Fatal(fmt.Sprintf("Error: %s", err))
|
||||
return
|
||||
}
|
||||
utils.Done("Deleting old backups...done")
|
||||
|
||||
}
|
||||
func deleteTemp() {
|
||||
utils.Info("Deleting %s ...", tmpPath)
|
||||
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if the current item is a file
|
||||
if !info.IsDir() {
|
||||
// Delete the file
|
||||
err = os.Remove(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
utils.Error("Error deleting files: %v", err)
|
||||
} else {
|
||||
utils.Info("Deleting %s ... done", tmpPath)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDatabaseConnection tests the database connection
|
||||
func testDatabaseConnection(db *dbConfig) {
|
||||
err := os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
utils.Info("Connecting to %s database ...", db.dbName)
|
||||
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
|
||||
// Capture the output
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
|
||||
|
||||
}
|
||||
utils.Info("Successfully connected to %s database", db.dbName)
|
||||
|
||||
}
|
||||
|
||||
40
pkg/migrate.go
Normal file
40
pkg/migrate.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"time"
|
||||
)
|
||||
|
||||
func StartMigration(cmd *cobra.Command) {
|
||||
intro()
|
||||
utils.Info("Starting database migration...")
|
||||
//Get DB config
|
||||
dbConf = getDbConfig(cmd)
|
||||
targetDbConf = getTargetDbConfig()
|
||||
|
||||
//Defining the target database variables
|
||||
newDbConfig := dbConfig{}
|
||||
newDbConfig.dbHost = targetDbConf.targetDbHost
|
||||
newDbConfig.dbPort = targetDbConf.targetDbPort
|
||||
newDbConfig.dbName = targetDbConf.targetDbName
|
||||
newDbConfig.dbUserName = targetDbConf.targetDbUserName
|
||||
newDbConfig.dbPassword = targetDbConf.targetDbPassword
|
||||
|
||||
//Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
//Backup source Database
|
||||
BackupDatabase(dbConf, backupFileName, true)
|
||||
//Restore source database into target database
|
||||
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
||||
RestoreDatabase(&newDbConfig, backupFileName)
|
||||
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
||||
utils.Info("Database migration completed.")
|
||||
}
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
@@ -10,11 +16,9 @@ import (
|
||||
)
|
||||
|
||||
func StartRestore(cmd *cobra.Command) {
|
||||
|
||||
intro()
|
||||
//Set env
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
utils.GetEnv(cmd, "port", "DB_PORT")
|
||||
|
||||
//Get flag value and set env
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
@@ -23,47 +27,45 @@ func StartRestore(cmd *cobra.Command) {
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
executionMode, _ = cmd.Flags().GetString("mode")
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
dbConf = getDbConfig(cmd)
|
||||
|
||||
switch storage {
|
||||
case "s3":
|
||||
restoreFromS3(file, bucket, s3Path)
|
||||
restoreFromS3(dbConf, file, bucket, s3Path)
|
||||
case "local":
|
||||
utils.Info("Restore database from local")
|
||||
copyToTmp(storagePath, file)
|
||||
RestoreDatabase(file)
|
||||
RestoreDatabase(dbConf, file)
|
||||
case "ssh":
|
||||
restoreFromRemote(file, remotePath)
|
||||
restoreFromRemote(dbConf, file, remotePath)
|
||||
case "ftp":
|
||||
utils.Fatal("Restore from FTP is not yet supported")
|
||||
default:
|
||||
utils.Info("Restore database from local")
|
||||
RestoreDatabase(file)
|
||||
copyToTmp(storagePath, file)
|
||||
RestoreDatabase(dbConf, file)
|
||||
}
|
||||
}
|
||||
|
||||
func restoreFromS3(file, bucket, s3Path string) {
|
||||
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
|
||||
utils.Info("Restore database from s3")
|
||||
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error download file from s3 %s %s", file, err))
|
||||
utils.Fatal("Error download file from s3 %s %v", file, err)
|
||||
}
|
||||
RestoreDatabase(file)
|
||||
RestoreDatabase(db, file)
|
||||
}
|
||||
func restoreFromRemote(file, remotePath string) {
|
||||
func restoreFromRemote(db *dbConfig, file, remotePath string) {
|
||||
utils.Info("Restore database from remote server")
|
||||
err := CopyFromRemote(file, remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error download file from remote server: ", filepath.Join(remotePath, file), err))
|
||||
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
|
||||
}
|
||||
RestoreDatabase(file)
|
||||
RestoreDatabase(db, file)
|
||||
}
|
||||
|
||||
// RestoreDatabase restore database
|
||||
func RestoreDatabase(file string) {
|
||||
dbHost = os.Getenv("DB_HOST")
|
||||
dbPassword = os.Getenv("DB_PASSWORD")
|
||||
dbUserName = os.Getenv("DB_USERNAME")
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
dbPort = os.Getenv("DB_PORT")
|
||||
func RestoreDatabase(db *dbConfig, file string) {
|
||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
if file == "" {
|
||||
utils.Fatal("Error, file required")
|
||||
@@ -93,31 +95,42 @@ func RestoreDatabase(file string) {
|
||||
}
|
||||
|
||||
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
|
||||
utils.TestDatabaseConnection()
|
||||
err = os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
testDatabaseConnection(db)
|
||||
utils.Info("Restoring database...")
|
||||
|
||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
||||
// Restore from compressed file / .sql.gz
|
||||
if extension == ".gz" {
|
||||
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
||||
str := "zcat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||
_, err := exec.Command("bash", "-c", str).Output()
|
||||
if err != nil {
|
||||
utils.Fatal("Error, in restoring the database %v", err)
|
||||
}
|
||||
utils.Info("Restoring database... done")
|
||||
utils.Done("Database has been restored")
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
|
||||
} else if extension == ".sql" {
|
||||
//Restore from sql file
|
||||
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
||||
str := "cat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||
_, err := exec.Command("bash", "-c", str).Output()
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
|
||||
utils.Fatal("Error in restoring the database %v", err)
|
||||
}
|
||||
utils.Info("Restoring database... done")
|
||||
utils.Done("Database has been restored")
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
} else {
|
||||
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
|
||||
utils.Fatal("Unknown file extension %s", extension)
|
||||
}
|
||||
|
||||
} else {
|
||||
utils.Fatal(fmt.Sprintf("File not found in %s", fmt.Sprintf("%s/%s", tmpPath, file)))
|
||||
utils.Fatal("File not found in %s", filepath.Join(tmpPath, file))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
// Package pkg /*
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
@@ -22,19 +24,10 @@ func CreateCrontabScript(disableCompression bool, storage string) {
|
||||
disableC = "--disable-compression"
|
||||
}
|
||||
|
||||
var scriptContent string
|
||||
|
||||
if storage == "s3" {
|
||||
scriptContent = fmt.Sprintf(`#!/usr/bin/env bash
|
||||
scriptContent := fmt.Sprintf(`#!/usr/bin/env bash
|
||||
set -e
|
||||
bkup backup --dbname %s --port %s --storage s3 --path %s %v
|
||||
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), os.Getenv("S3_PATH"), disableC)
|
||||
} else {
|
||||
scriptContent = fmt.Sprintf(`#!/usr/bin/env bash
|
||||
set -e
|
||||
bkup backup --dbname %s --port %s %v
|
||||
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), disableC)
|
||||
}
|
||||
/usr/local/bin/mysql-bkup backup --dbname %s --storage %s %v
|
||||
`, os.Getenv("DB_NAME"), storage, disableC)
|
||||
|
||||
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
|
||||
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
|
||||
@@ -63,7 +56,7 @@ bkup backup --dbname %s --port %s %v
|
||||
}
|
||||
|
||||
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
|
||||
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
|
||||
`, os.Getenv("BACKUP_CRON_EXPRESSION"), cronLogFile)
|
||||
|
||||
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
|
||||
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
|
||||
|
||||
22
pkg/var.go
22
pkg/var.go
@@ -1,19 +1,21 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
const cronLogFile = "/var/log/mysql-bkup.log"
|
||||
const tmpPath = "/tmp/backup"
|
||||
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
||||
const algorithm = "aes256"
|
||||
const gpgHome = "gnupg"
|
||||
const gpgExtension = "gpg"
|
||||
|
||||
var (
|
||||
storage = "local"
|
||||
file = ""
|
||||
dbPassword = ""
|
||||
dbUserName = ""
|
||||
dbName = ""
|
||||
dbHost = ""
|
||||
dbPort = "3306"
|
||||
executionMode = "default"
|
||||
storagePath = "/backup"
|
||||
disableCompression = false
|
||||
@@ -27,6 +29,16 @@ var dbHVars = []string{
|
||||
"DB_USERNAME",
|
||||
"DB_NAME",
|
||||
}
|
||||
var tdbRVars = []string{
|
||||
"TARGET_DB_HOST",
|
||||
"TARGET_DB_PORT",
|
||||
"TARGET_DB_NAME",
|
||||
"TARGET_DB_USERNAME",
|
||||
"TARGET_DB_PASSWORD",
|
||||
}
|
||||
|
||||
var dbConf *dbConfig
|
||||
var targetDbConf *targetDbConfig
|
||||
|
||||
// sshHVars Required environment variables for SSH remote server storage
|
||||
var sshHVars = []string{
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
DB_USERNAME='db_username'
|
||||
DB_PASSWORD='password'
|
||||
DB_HOST='db_hostname'
|
||||
DB_NAME='db_name'
|
||||
BACKUP_DIR="$PWD/backup"
|
||||
|
||||
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:latest backup -d $DB_NAME
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
import (
|
||||
@@ -43,13 +49,19 @@ func Done(msg string, args ...any) {
|
||||
}
|
||||
}
|
||||
|
||||
// Fatal logs an error message and exits the program
|
||||
func Fatal(msg string, args ...any) {
|
||||
// Fatal logs an error message and exits the program.
|
||||
formattedMessage := fmt.Sprintf(msg, args...)
|
||||
if len(args) == 0 {
|
||||
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
|
||||
NotifyError(msg)
|
||||
} else {
|
||||
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
|
||||
NotifyError(formattedMessage)
|
||||
|
||||
}
|
||||
|
||||
os.Exit(1)
|
||||
os.Kill.Signal()
|
||||
}
|
||||
|
||||
11
utils/s3.go
11
utils/s3.go
@@ -1,3 +1,9 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
import (
|
||||
@@ -42,8 +48,7 @@ func CreateSession() (*session.Session, error) {
|
||||
|
||||
err = CheckEnvVars(awsVars)
|
||||
if err != nil {
|
||||
Error(fmt.Sprintf("Error checking environment variables\n: %s", err))
|
||||
os.Exit(1)
|
||||
Fatal("Error checking environment variables\n: %s", err)
|
||||
}
|
||||
// S3 Config
|
||||
s3Config := &aws.Config{
|
||||
@@ -123,7 +128,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
||||
fmt.Println("Failed to download file", err)
|
||||
return err
|
||||
}
|
||||
Info(fmt.Sprintf("Backup downloaded: ", file.Name(), " bytes size ", numBytes))
|
||||
Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
138
utils/utils.go
138
utils/utils.go
@@ -1,19 +1,22 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
/*****
|
||||
* MySQL Backup & Restore
|
||||
* @author Jonas Kaninda
|
||||
* @license MIT License <https://opensource.org/licenses/MIT>
|
||||
* @link https://github.com/jkaninda/mysql-bkup
|
||||
**/
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func FileExists(filename string) bool {
|
||||
@@ -90,34 +93,6 @@ func IsDirEmpty(name string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TestDatabaseConnection tests the database connection
|
||||
func TestDatabaseConnection() {
|
||||
dbHost := os.Getenv("DB_HOST")
|
||||
dbPassword := os.Getenv("DB_PASSWORD")
|
||||
dbUserName := os.Getenv("DB_USERNAME")
|
||||
dbName := os.Getenv("DB_NAME")
|
||||
dbPort := os.Getenv("DB_PORT")
|
||||
|
||||
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
|
||||
Fatal("Please make sure all required database environment variables are set")
|
||||
} else {
|
||||
Info("Connecting to database ...")
|
||||
|
||||
cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
|
||||
|
||||
// Capture the output
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
Error("Error testing database connection: %v\nOutput: %s", err, out.String())
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
Info("Successfully connected to database")
|
||||
}
|
||||
}
|
||||
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
|
||||
value, _ := cmd.Flags().GetString(flagName)
|
||||
if value != "" {
|
||||
@@ -182,3 +157,96 @@ func CheckEnvVars(vars []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeDir create directory
|
||||
func MakeDir(dirPath string) error {
|
||||
err := os.Mkdir(dirPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeDirAll create directory
|
||||
func MakeDirAll(dirPath string) error {
|
||||
err := os.MkdirAll(dirPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func GetIntEnv(envName string) int {
|
||||
val := os.Getenv(envName)
|
||||
if val == "" {
|
||||
return 0
|
||||
}
|
||||
ret, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
Error("Error: %v", err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
func sendMessage(msg string) {
|
||||
|
||||
Info("Sending notification... ")
|
||||
chatId := os.Getenv("TG_CHAT_ID")
|
||||
body, _ := json.Marshal(map[string]string{
|
||||
"chat_id": chatId,
|
||||
"text": msg,
|
||||
})
|
||||
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
|
||||
// Create an HTTP post request
|
||||
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
request.Header.Add("Content-Type", "application/json")
|
||||
client := &http.Client{}
|
||||
response, err := client.Do(request)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
code := response.StatusCode
|
||||
if code == 200 {
|
||||
Info("Notification has been sent")
|
||||
} else {
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
Error("Message not sent, error: %s", string(body))
|
||||
}
|
||||
|
||||
}
|
||||
func NotifySuccess(fileName string) {
|
||||
var vars = []string{
|
||||
"TG_TOKEN",
|
||||
"TG_CHAT_ID",
|
||||
}
|
||||
|
||||
//Telegram notification
|
||||
err := CheckEnvVars(vars)
|
||||
if err == nil {
|
||||
message := "MySQL Backup \n" +
|
||||
"Database has been backed up \n" +
|
||||
"Backup name is " + fileName
|
||||
sendMessage(message)
|
||||
}
|
||||
}
|
||||
func NotifyError(error string) {
|
||||
var vars = []string{
|
||||
"TG_TOKEN",
|
||||
"TG_CHAT_ID",
|
||||
}
|
||||
|
||||
//Telegram notification
|
||||
err := CheckEnvVars(vars)
|
||||
if err == nil {
|
||||
message := "MySQL Backup \n" +
|
||||
"An error occurred during database backup \n" +
|
||||
"Error: " + error
|
||||
sendMessage(message)
|
||||
}
|
||||
}
|
||||
|
||||
func getTgUrl() string {
|
||||
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user