feat: add migrate database from a source to a target databse

fix: gpg encrypt permission warning message, update Kubernetes deployment example
This commit is contained in:
Jonas Kaninda
2024-08-30 19:58:12 +02:00
parent 8fb008151c
commit 662b73579d
18 changed files with 497 additions and 157 deletions

50
Makefile Normal file
View File

@@ -0,0 +1,50 @@
BINARY_NAME=mysql-bkup
IMAGE_NAME=jkaninda/mysql-bkup
include .env
export
run:
go run . backup
build:
go build -o bin/${BINARY_NAME} .
compile:
GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 .
GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 .
GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 .
GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 .
docker-build:
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
docker-run: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --prune --keep-last 2
docker-restore: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore -f ${FILE_NAME}
docker-run-migrate: docker-build
docker run --rm --network web --name mysql-bkup --env-file .env -v "./backup:/backup" ${IMAGE_NAME} migrate
docker-run-scheduled: #docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --mode scheduled --period "* * * * *"
docker-run-scheduled-s3: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
docker-run-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --path /custom-path
docker-restore-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore --storage s3 -f ${FILE_NAME} --path /custom-path
docker-run-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage ssh
docker-restore-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" ${IMAGE_NAME} restore --storage ssh -f ${FILE_NAME}
run-docs:
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t

View File

@@ -95,12 +95,13 @@ For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as
apiVersion: batch/v1 apiVersion: batch/v1
kind: Job kind: Job
metadata: metadata:
name: backup name: backup-job
spec: spec:
ttlSecondsAfterFinished: 100
template: template:
spec: spec:
containers: containers:
- name: mysql-bkup - name: pg-bkup
# In production, it is advised to lock your image tag to a proper # In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`. # release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases # Check https://github.com/jkaninda/mysql-bkup/releases
@@ -109,38 +110,26 @@ spec:
command: command:
- /bin/sh - /bin/sh
- -c - -c
- bkup - backup -d dbname
- backup
- --storage
- s3
resources: resources:
limits: limits:
memory: "128Mi" memory: "128Mi"
cpu: "500m" cpu: "500m"
env: env:
- name: DB_PORT
value: "3306"
- name: DB_HOST - name: DB_HOST
value: "" value: "mysql"
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME - name: DB_USERNAME
value: "username" value: "user"
# Please use secret!
- name: DB_PASSWORD - name: DB_PASSWORD
value: "" value: "password"
- name: AWS_S3_ENDPOINT volumeMounts:
value: "https://s3.amazonaws.com" - mountPath: /backup
- name: AWS_S3_BUCKET_NAME name: backup
value: "xxx" volumes:
- name: AWS_REGION - name: backup
value: "us-west-2" hostPath:
- name: AWS_ACCESS_KEY path: /home/toto/backup # directory location on host
value: "xxxx" type: Directory # this field is optional
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never restartPolicy: Never
``` ```
## Available image registries ## Available image registries

View File

@@ -21,6 +21,8 @@ var BackupCmd = &cobra.Command{
func init() { func init() {
//Backup //Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled") BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time") BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled") BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")

21
cmd/migrate.go Normal file
View File

@@ -0,0 +1,21 @@
package cmd
import (
"github.com/jkaninda/mysql-bkup/pkg"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
)
var MigrateCmd = &cobra.Command{
Use: "migrate",
Short: "Migrate database from a source database to a target database",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
pkg.StartMigration(cmd)
} else {
utils.Fatal("Error, no argument required")
}
},
}

View File

@@ -24,5 +24,7 @@ var RestoreCmd = &cobra.Command{
func init() { func init() {
//Restore //Restore
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database") RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
} }

View File

@@ -30,13 +30,12 @@ func Execute() {
} }
func init() { func init() {
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name") rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port") rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only") rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
rootCmd.AddCommand(VersionCmd) rootCmd.AddCommand(VersionCmd)
rootCmd.AddCommand(BackupCmd) rootCmd.AddCommand(BackupCmd)
rootCmd.AddCommand(RestoreCmd) rootCmd.AddCommand(RestoreCmd)
rootCmd.AddCommand(MigrateCmd)
} }

View File

@@ -14,7 +14,7 @@ ENV DB_HOST=""
ENV DB_NAME="" ENV DB_NAME=""
ENV DB_USERNAME="" ENV DB_USERNAME=""
ENV DB_PASSWORD="" ENV DB_PASSWORD=""
ENV DB_PORT="3306" ENV DB_PORT=3306
ENV STORAGE=local ENV STORAGE=local
ENV AWS_S3_ENDPOINT="" ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME="" ENV AWS_S3_BUCKET_NAME=""
@@ -30,11 +30,15 @@ ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME="" ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE="" ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22" ENV SSH_PORT="22"
ENV SOURCE_DB_HOST=""
ENV SOURCE_DB_PORT=3306
ENV SOURCE_DB_NAME=""
ENV SOURCE_DB_USERNAME=""
ENV SOURCE_DB_PASSWORD=""
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.2.2" ENV VERSION="v1.2.3"
ENV BACKUP_CRON_EXPRESSION="" ENV BACKUP_CRON_EXPRESSION=""
ENV GNUPGHOME="/tmp/gnupg" ARG WORKDIR="/config"
ARG WORKDIR="/app"
ARG BACKUPDIR="/backup" ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup" ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron" ARG BACKUP_CRON="/etc/cron.d/backup_cron"
@@ -49,16 +53,14 @@ RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir $WORKDIR RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR && \ RUN mkdir -p $BACKUP_TMP_DIR
mkdir -p $GNUPGHOME
RUN chmod 777 $WORKDIR RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \ RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \ touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \ chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT && \ chmod 777 $BACKUP_CRON_SCRIPT
chmod 777 $GNUPGHOME
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup RUN chmod +x /usr/local/bin/mysql-bkup
@@ -67,19 +69,15 @@ RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
WORKDIR $WORKDIR # Create backup script and make it executable
# Create backup shell script RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
COPY <<EOF /usr/local/bin/backup chmod +x /usr/local/bin/backup
#!/bin/sh # Create restore script and make it executable
# shellcheck disable=SC2068 RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
/usr/local/bin/mysql-bkup backup $@
EOF
# Create restore shell script
COPY <<EOF /usr/local/bin/restore
#!/bin/sh
# shellcheck disable=SC2068
/usr/local/bin/mysql-bkup restore $@
EOF
RUN chmod +x /usr/local/bin/backup && \
chmod +x /usr/local/bin/restore chmod +x /usr/local/bin/restore
# Create migrate script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
chmod +x /usr/local/bin/migrate
WORKDIR $WORKDIR
ENTRYPOINT ["/usr/local/bin/mysql-bkup"] ENTRYPOINT ["/usr/local/bin/mysql-bkup"]

123
docs/how-tos/migrate.md Normal file
View File

@@ -0,0 +1,123 @@
---
title: Migrate database
layout: default
parent: How Tos
nav_order: 9
---
# Migrate database
To migrate the database, you need to add `migrate` command.
{: .note }
The Mysql backup has another great feature: migrating your database from a source database to another.
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
### Docker compose
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: migrate
volumes:
- ./backup:/backup
environment:
## Target database
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Source database
- SOURCE_DB_HOST=mysql2
- SOURCE_DB_PORT=3306
- SOURCE_DB_NAME=sourcedb
- SOURCE_DB_USERNAME=jonas
- SOURCE_DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Migrate database using Docker CLI
```
## Target database
DB_PORT=3306
DB_HOST=mysql
DB_NAME=targetdb
DB_USERNAME=targetuser
DB_PASSWORD=password
## Source database
SOURCE_DB_HOST=mysql2
SOURCE_DB_PORT=3306
SOURCE_DB_NAME=sourcedb
SOURCE_DB_USERNAME=sourceuser
SOURCE_DB_PASSWORD=password
```
```shell
docker run --rm --network your_network_name \
--env-file your-env
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup migrate -d database_name
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migrate-db
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- migrate -d targetdb
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
## Target DB
- name: DB_HOST
value: "postgres-target"
- name: DB_USERNAME
value: "mysql"
- name: DB_PASSWORD
value: "password"
## Source DB
- name: SOURCE_DB_HOST
value: "postgres-source"
- name: SOURCE_DB_NAME
value: "sourcedb"
- name: SOURCE_DB_USERNAME
value: "postgres"
# Please use secret!
- name: SOURCE_DB_PASSWORD
value: "password"
restartPolicy: Never
```

View File

@@ -78,6 +78,49 @@ services:
networks: networks:
web: web:
``` ```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries ## Available image registries

View File

@@ -6,7 +6,7 @@ nav_order: 2
# Configuration reference # Configuration reference
Backup and restore targets, schedule and retention are configured using environment variables or flags. Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
@@ -19,6 +19,7 @@ Backup and restore targets, schedule and retention are configured using environm
| mysql-bkup | bkup | CLI utility | | mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation | | backup | | Backup database operation |
| restore | | Restore database operation | | restore | | Restore database operation |
| migrate | | Migrate database from one instance to another one |
| --storage | -s | Storage. local or s3 (default: local) | | --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name for restoration | | --file | -f | File name for restoration |
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` | | --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
@@ -35,7 +36,7 @@ Backup and restore targets, schedule and retention are configured using environm
## Environment variables ## Environment variables
| Name | Requirement | Description | | Name | Requirement | Description |
|------------------------|-----------------------------------------------------|------------------------------------------------------| |------------------------|----------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number | | DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host | | DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name | | DB_NAME | Optional if it was provided from the -d flag | Database name |
@@ -48,7 +49,7 @@ Backup and restore targets, schedule and retention are configured using environm
| AWS_REGION | Optional, required for S3 storage | AWS Region | | AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL | | AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) | | FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Cron expression | | BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase | | GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip | | SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user | | SSH_USER | Optional, required for SSH storage | ssh remote user |
@@ -56,6 +57,11 @@ Backup and restore targets, schedule and retention are configured using environm
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key | | SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port | | SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) | | SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
| SOURCE_DB_HOST | Optional, required for database migration | Source database host |
| SOURCE_DB_PORT | Optional, required for database migration | Source database port |
| SOURCE_DB_NAME | Optional, required for database migration | Source database name |
| SOURCE_DB_USERNAME | Optional, required for database migration | Source database username |
| SOURCE_DB_PASSWORD | Optional, required for database migration | Source database password |
--- ---
## Run in Scheduled mode ## Run in Scheduled mode

View File

@@ -32,37 +32,38 @@ func StartBackup(cmd *cobra.Command) {
prune, _ := cmd.Flags().GetBool("prune") prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression") disableCompression, _ = cmd.Flags().GetBool("disable-compression")
executionMode, _ = cmd.Flags().GetString("mode") executionMode, _ = cmd.Flags().GetString("mode")
dbName = os.Getenv("DB_NAME")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE") gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH") _ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
dbConf = getDbConfig(cmd)
// //
if gpqPassphrase != "" { if gpqPassphrase != "" {
encryption = true encryption = true
} }
//Generate file name //Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405")) backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
if disableCompression { if disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405")) backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
} }
if executionMode == "default" { if executionMode == "default" {
switch storage { switch storage {
case "s3": case "s3":
s3Backup(backupFileName, disableCompression, prune, backupRetention, encryption) s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "local": case "local":
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "ssh", "remote": case "ssh", "remote":
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption) sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
case "ftp": case "ftp":
utils.Fatal("Not supported storage type: %s", storage) utils.Fatal("Not supported storage type: %s", storage)
default: default:
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
} }
} else if executionMode == "scheduled" { } else if executionMode == "scheduled" {
scheduledMode(storage) scheduledMode(dbConf, storage)
} else { } else {
utils.Fatal("Error, unknown execution mode!") utils.Fatal("Error, unknown execution mode!")
} }
@@ -70,7 +71,7 @@ func StartBackup(cmd *cobra.Command) {
} }
// Run in scheduled mode // Run in scheduled mode
func scheduledMode(storage string) { func scheduledMode(db *dbConfig, storage string) {
fmt.Println() fmt.Println()
fmt.Println("**********************************") fmt.Println("**********************************")
@@ -81,7 +82,7 @@ func scheduledMode(storage string) {
utils.Info("Storage type %s ", storage) utils.Info("Storage type %s ", storage)
//Test database connexion //Test database connexion
utils.TestDatabaseConnection() testDatabaseConnection(db)
utils.Info("Creating backup job...") utils.Info("Creating backup job...")
CreateCrontabScript(disableCompression, storage) CreateCrontabScript(disableCompression, storage)
@@ -117,12 +118,7 @@ func scheduledMode(storage string) {
} }
// BackupDatabase backup database // BackupDatabase backup database
func BackupDatabase(backupFileName string, disableCompression bool) { func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
storagePath = os.Getenv("STORAGE_PATH") storagePath = os.Getenv("STORAGE_PATH")
err := utils.CheckEnvVars(dbHVars) err := utils.CheckEnvVars(dbHVars)
@@ -132,7 +128,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
} }
utils.Info("Starting database backup...") utils.Info("Starting database backup...")
utils.TestDatabaseConnection() testDatabaseConnection(db)
// Backup Database database // Backup Database database
utils.Info("Backing up database...") utils.Info("Backing up database...")
@@ -140,11 +136,11 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
if disableCompression { if disableCompression {
// Execute mysqldump // Execute mysqldump
cmd := exec.Command("mysqldump", cmd := exec.Command("mysqldump",
"-h", dbHost, "-h", db.dbHost,
"-P", dbPort, "-P", db.dbPort,
"-u", dbUserName, "-u", db.dbUserName,
"--password="+dbPassword, "--password="+db.dbPassword,
dbName, db.dbName,
) )
output, err := cmd.Output() output, err := cmd.Output()
if err != nil { if err != nil {
@@ -166,7 +162,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
} else { } else {
// Execute mysqldump // Execute mysqldump
cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName) cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName)
stdout, err := cmd.StdoutPipe() stdout, err := cmd.StdoutPipe()
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@@ -189,9 +185,9 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
} }
} }
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to local storage") utils.Info("Backup database to local storage")
BackupDatabase(backupFileName, disableCompression) BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName finalFileName := backupFileName
if encrypt { if encrypt {
encryptBackup(backupFileName) encryptBackup(backupFileName)
@@ -207,12 +203,12 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
deleteTemp() deleteTemp()
} }
func s3Backup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH") s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
utils.Info("Backup database to s3 storage") utils.Info("Backup database to s3 storage")
//Backup database //Backup database
BackupDatabase(backupFileName, disableCompression) BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName finalFileName := backupFileName
if encrypt { if encrypt {
encryptBackup(backupFileName) encryptBackup(backupFileName)
@@ -243,10 +239,10 @@ func s3Backup(backupFileName string, disableCompression bool, prune bool, backup
//Delete temp //Delete temp
deleteTemp() deleteTemp()
} }
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to Remote server") utils.Info("Backup database to Remote server")
//Backup database //Backup database
BackupDatabase(backupFileName, disableCompression) BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName finalFileName := backupFileName
if encrypt { if encrypt {
encryptBackup(backupFileName) encryptBackup(backupFileName)

View File

@@ -1,4 +1,59 @@
package pkg package pkg
import (
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
)
type Config struct { type Config struct {
} }
type dbConfig struct {
dbHost string
dbPort string
dbName string
dbUserName string
dbPassword string
}
type dbSourceConfig struct {
sourceDbHost string
sourceDbPort string
sourceDbUserName string
sourceDbPassword string
sourceDbName string
}
func getDbConfig(cmd *cobra.Command) *dbConfig {
//Set env
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
dConf := dbConfig{}
dConf.dbHost = os.Getenv("DB_HOST")
dConf.dbPort = os.Getenv("DB_PORT")
dConf.dbName = os.Getenv("DB_NAME")
dConf.dbUserName = os.Getenv("DB_USERNAME")
dConf.dbPassword = os.Getenv("DB_PASSWORD")
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &dConf
}
func getSourceDbConfig() *dbSourceConfig {
sdbConfig := dbSourceConfig{}
sdbConfig.sourceDbHost = os.Getenv("SOURCE_DB_HOST")
sdbConfig.sourceDbPort = os.Getenv("SOURCE_DB_PORT")
sdbConfig.sourceDbName = os.Getenv("SOURCE_DB_NAME")
sdbConfig.sourceDbUserName = os.Getenv("SOURCE_DB_USERNAME")
sdbConfig.sourceDbPassword = os.Getenv("SOURCE_DB_PASSWORD")
err := utils.CheckEnvVars(sdbRVars)
if err != nil {
utils.Error("Please make sure all required environment variables for source database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &sdbConfig
}

View File

@@ -9,11 +9,17 @@ import (
func Decrypt(inputFile string, passphrase string) error { func Decrypt(inputFile string, passphrase string) error {
utils.Info("Decrypting backup file: " + inputFile + " ...") utils.Info("Decrypting backup file: " + inputFile + " ...")
//Create gpg home dir
err := utils.MakeDir(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile) cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
err := cmd.Run() err = cmd.Run()
if err != nil { if err != nil {
return err return err
} }
@@ -24,11 +30,17 @@ func Decrypt(inputFile string, passphrase string) error {
func Encrypt(inputFile string, passphrase string) error { func Encrypt(inputFile string, passphrase string) error {
utils.Info("Encrypting backup...") utils.Info("Encrypting backup...")
//Create gpg home dir
err := utils.MakeDir(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile) cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
err := cmd.Run() err = cmd.Run()
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1,9 +1,11 @@
package pkg package pkg
import ( import (
"bytes"
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"time" "time"
) )
@@ -96,3 +98,24 @@ func deleteTemp() {
utils.Info("Deleting %s ... done", tmpPath) utils.Info("Deleting %s ... done", tmpPath)
} }
} }
// TestDatabaseConnection tests the database connection
func testDatabaseConnection(db *dbConfig) {
utils.Info("Connecting to %s database ...", db.dbName)
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Run()
if err != nil {
utils.Error("Error testing database connection: %v\nOutput: %s", err, out.String())
os.Exit(1)
}
utils.Info("Successfully connected to %s database", db.dbName)
}

31
pkg/migrate.go Normal file
View File

@@ -0,0 +1,31 @@
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"time"
)
func StartMigration(cmd *cobra.Command) {
utils.Info("Starting database migration...")
//Get DB config
dbConf = getDbConfig(cmd)
sDbConf = getSourceDbConfig()
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql", sDbConf.sourceDbName, time.Now().Format("20060102_150405"))
//Backup Source Database
newDbConfig := dbConfig{}
newDbConfig.dbHost = sDbConf.sourceDbHost
newDbConfig.dbPort = sDbConf.sourceDbPort
newDbConfig.dbName = sDbConf.sourceDbName
newDbConfig.dbUserName = sDbConf.sourceDbUserName
newDbConfig.dbPassword = sDbConf.sourceDbPassword
BackupDatabase(&newDbConfig, backupFileName, true)
//Restore source database into target database
utils.Info("Restoring [%s] database into [%s] database...", sDbConf.sourceDbName, dbConf.dbName)
RestoreDatabase(dbConf, backupFileName)
utils.Info("[%s] database has been restored into [%s] database", sDbConf.sourceDbName, dbConf.dbName)
utils.Info("Database migration completed!")
}

View File

@@ -13,8 +13,6 @@ func StartRestore(cmd *cobra.Command) {
//Set env //Set env
utils.SetEnv("STORAGE_PATH", storagePath) utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
//Get flag value and set env //Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH") s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
@@ -23,47 +21,45 @@ func StartRestore(cmd *cobra.Command) {
file = utils.GetEnv(cmd, "file", "FILE_NAME") file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode") executionMode, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
dbConf = getDbConfig(cmd)
switch storage { switch storage {
case "s3": case "s3":
restoreFromS3(file, bucket, s3Path) restoreFromS3(dbConf, file, bucket, s3Path)
case "local": case "local":
utils.Info("Restore database from local") utils.Info("Restore database from local")
copyToTmp(storagePath, file) copyToTmp(storagePath, file)
RestoreDatabase(file) RestoreDatabase(dbConf, file)
case "ssh": case "ssh":
restoreFromRemote(file, remotePath) restoreFromRemote(dbConf, file, remotePath)
case "ftp": case "ftp":
utils.Fatal("Restore from FTP is not yet supported") utils.Fatal("Restore from FTP is not yet supported")
default: default:
utils.Info("Restore database from local") utils.Info("Restore database from local")
RestoreDatabase(file) copyToTmp(storagePath, file)
RestoreDatabase(dbConf, file)
} }
} }
func restoreFromS3(file, bucket, s3Path string) { func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
utils.Info("Restore database from s3") utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path) err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil { if err != nil {
utils.Fatal("Error download file from s3 %s %v", file, err) utils.Fatal("Error download file from s3 %s %v", file, err)
} }
RestoreDatabase(file) RestoreDatabase(db, file)
} }
func restoreFromRemote(file, remotePath string) { func restoreFromRemote(db *dbConfig, file, remotePath string) {
utils.Info("Restore database from remote server") utils.Info("Restore database from remote server")
err := CopyFromRemote(file, remotePath) err := CopyFromRemote(file, remotePath)
if err != nil { if err != nil {
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err) utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
} }
RestoreDatabase(file) RestoreDatabase(db, file)
} }
// RestoreDatabase restore database // RestoreDatabase restore database
func RestoreDatabase(file string) { func RestoreDatabase(db *dbConfig, file string) {
dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
gpgPassphrase := os.Getenv("GPG_PASSPHRASE") gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
if file == "" { if file == "" {
utils.Fatal("Error, file required") utils.Fatal("Error, file required")
@@ -93,7 +89,7 @@ func RestoreDatabase(file string) {
} }
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) { if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
utils.TestDatabaseConnection() testDatabaseConnection(db)
utils.Info("Restoring database...") utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file)) extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))

View File

@@ -4,16 +4,12 @@ const cronLogFile = "/var/log/mysql-bkup.log"
const tmpPath = "/tmp/backup" const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh" const backupCronFile = "/usr/local/bin/backup_cron.sh"
const algorithm = "aes256" const algorithm = "aes256"
const gpgHome = "gnupg"
const gpgExtension = "gpg" const gpgExtension = "gpg"
var ( var (
storage = "local" storage = "local"
file = "" file = ""
dbPassword = ""
dbUserName = ""
dbName = ""
dbHost = ""
dbPort = "3306"
executionMode = "default" executionMode = "default"
storagePath = "/backup" storagePath = "/backup"
disableCompression = false disableCompression = false
@@ -27,6 +23,16 @@ var dbHVars = []string{
"DB_USERNAME", "DB_USERNAME",
"DB_NAME", "DB_NAME",
} }
var sdbRVars = []string{
"SOURCE_DB_HOST",
"SOURCE_DB_PORT",
"SOURCE_DB_NAME",
"SOURCE_DB_USERNAME",
"SOURCE_DB_PASSWORD",
}
var dbConf *dbConfig
var sDbConf *dbSourceConfig
// sshHVars Required environment variables for SSH remote server storage // sshHVars Required environment variables for SSH remote server storage
var sshHVars = []string{ var sshHVars = []string{

View File

@@ -7,13 +7,11 @@ package utils
* @link https://github.com/jkaninda/mysql-bkup * @link https://github.com/jkaninda/mysql-bkup
**/ **/
import ( import (
"bytes"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"io" "io"
"io/fs" "io/fs"
"os" "os"
"os/exec"
) )
func FileExists(filename string) bool { func FileExists(filename string) bool {
@@ -90,34 +88,6 @@ func IsDirEmpty(name string) (bool, error) {
return true, nil return true, nil
} }
// TestDatabaseConnection tests the database connection
func TestDatabaseConnection() {
dbHost := os.Getenv("DB_HOST")
dbPassword := os.Getenv("DB_PASSWORD")
dbUserName := os.Getenv("DB_USERNAME")
dbName := os.Getenv("DB_NAME")
dbPort := os.Getenv("DB_PORT")
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
Fatal("Please make sure all required database environment variables are set")
} else {
Info("Connecting to database ...")
cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Run()
if err != nil {
Error("Error testing database connection: %v\nOutput: %s", err, out.String())
os.Exit(1)
}
Info("Successfully connected to database")
}
}
func GetEnv(cmd *cobra.Command, flagName, envName string) string { func GetEnv(cmd *cobra.Command, flagName, envName string) string {
value, _ := cmd.Flags().GetString(flagName) value, _ := cmd.Flags().GetString(flagName)
if value != "" { if value != "" {
@@ -182,3 +152,21 @@ func CheckEnvVars(vars []string) error {
return nil return nil
} }
// MakeDir create directory
func MakeDir(dirPath string) error {
err := os.Mkdir(dirPath, 0700)
if err != nil {
return err
}
return nil
}
// MakeDirAll create directory
func MakeDirAll(dirPath string) error {
err := os.MkdirAll(dirPath, 0700)
if err != nil {
return err
}
return nil
}