Compare commits

...

26 Commits

Author SHA1 Message Date
Jonas Kaninda
4808f093e5 Merge pull request #79 from jkaninda/develop
Update version
2024-09-10 23:11:28 +02:00
Jonas Kaninda
c7a03861fe Update version 2024-09-10 23:10:24 +02:00
Jonas Kaninda
36ec63d522 Merge pull request #78 from jkaninda/develop
feat: Add Telegram notification
2024-09-10 23:04:12 +02:00
Jonas Kaninda
0f07de1d83 feat: Add Telegram notification 2024-09-10 23:01:26 +02:00
Jonas Kaninda
ae55839996 Merge pull request #77 from jkaninda/docs
docs: update Kubernetes deployment
2024-09-09 07:17:51 +02:00
Jonas Kaninda
a7f7e57a0d docs: update Kubernetes deployment 2024-09-09 07:17:15 +02:00
Jonas Kaninda
b2ddaec93b Merge pull request #76 from jkaninda/docs
docs: add buy me a coffee link
2024-09-05 22:43:09 +02:00
Jonas Kaninda
b3570d774c docs: add buy me a coffee link 2024-09-05 22:42:37 +02:00
Jonas Kaninda
38f7e91c03 Merge pull request #75 from jkaninda/develop
chore: rename environment variable for database migration operation
2024-09-03 07:06:24 +02:00
Jonas Kaninda
07c2935925 chore: rename environment variable for database migration operation 2024-09-03 06:49:26 +02:00
Jonas Kaninda
f3c5585051 Merge pull request #74 from jkaninda/docs
Docs
2024-08-30 21:24:50 +02:00
Jonas Kaninda
7163d030a5 chore: remove dbport from command flag 2024-08-30 21:22:18 +02:00
Jonas Kaninda
a2cec86e73 chore: remove dbport from command flag 2024-08-30 21:21:21 +02:00
Jonas Kaninda
662b73579d feat: add migrate database from a source to a target databse
fix: gpg encrypt permission warning message, update Kubernetes deployment example
2024-08-30 19:58:12 +02:00
c9f8a32de1 Merge pull request #73 from jkaninda/docs
docs: update Kubernetes deployment
2024-08-28 20:35:31 +02:00
8fb008151c docs: update Kubernetes deployment 2024-08-28 20:35:01 +02:00
113c84c885 Merge pull request #72 from jkaninda/docs
docs: update readme
2024-08-21 03:53:15 +02:00
58deb92953 docs: update readme 2024-08-21 03:52:49 +02:00
c41afb8b57 Merge pull request #71 from jkaninda/docs
docs: update readme
2024-08-21 03:51:25 +02:00
02e51a3933 docs: update readme 2024-08-21 03:50:59 +02:00
db4061b64b Merge pull request #70 from jkaninda/docs
docs: update readme
2024-08-21 03:49:58 +02:00
9467b157aa docs: update reamdme 2024-08-21 03:49:15 +02:00
c229ebdc9d Merge pull request #69 from jkaninda/docs
docs: fix grammar
2024-08-20 19:21:24 +02:00
7b701d1740 docs: fix grammar 2024-08-20 19:20:54 +02:00
ad6f190bad Merge pull request #68 from jkaninda/docs
docs: update readme
2024-08-15 06:06:26 +02:00
de4dcaaeca docs: update readme 2024-08-15 06:05:39 +02:00
35 changed files with 757 additions and 290 deletions

View File

@@ -1,4 +1,4 @@
name: Release
name: CI
on:
push:
tags:

1
.gitignore vendored
View File

@@ -9,3 +9,4 @@ mysql-bkup
/.DS_Store
/.idea
bin
Makefile

View File

@@ -1,48 +0,0 @@
BINARY_NAME=mysql-bkup
IMAGE_NAME=jkaninda/mysql-bkup
include .env
export
run:
go run . backup
build:
go build -o bin/${BINARY_NAME} .
compile:
GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 .
GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 .
GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 .
GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 .
docker-build:
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
docker-run: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --prune --keep-last 2
docker-restore: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore -f ${FILE_NAME}
docker-run-scheduled: #docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --mode scheduled --period "* * * * *"
docker-run-scheduled-s3: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
docker-run-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --path /custom-path
docker-restore-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore --storage s3 -f ${FILE_NAME} --path /custom-path
docker-run-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage ssh
docker-restore-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" ${IMAGE_NAME} restore --storage ssh -f ${FILE_NAME}
run-docs:
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t

View File

@@ -5,16 +5,19 @@ It also supports __encrypting__ your backups using GPG.
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
It also supports __encrypting__ your backups using GPG.
It also supports database __encryption__ using GPG.
[![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
[![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square)
![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/mysql-bkup?style=flat-square)
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
Successfully tested on:
- Docker
- Docker Swarm
- Docker in Swarm mode
- Kubernetes
- OpenShift
## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
@@ -31,7 +34,7 @@ It also supports __encrypting__ your backups using GPG.
## Storage:
- Local
- AWS S3 or any S3 Alternatives for Object Storage
- SSH
- SSH remote server
## Quickstart
@@ -72,8 +75,8 @@ services:
volumes:
- ./backup:/backup
environment:
- DB_PORT=5432
- DB_HOST=postgres
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
@@ -93,36 +96,41 @@ For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as
apiVersion: batch/v1
kind: Job
metadata:
name: backup
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
- name: pg-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- bkup
- backup
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
value: "mysql"
- name: DB_USERNAME
value: "username"
# Please use secret!
value: "user"
- name: DB_PASSWORD
value: ""
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries

View File

@@ -1,3 +1,9 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd
import (
@@ -21,6 +27,8 @@ var BackupCmd = &cobra.Command{
func init() {
//Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")

27
cmd/migrate.go Normal file
View File

@@ -0,0 +1,27 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd
import (
"github.com/jkaninda/mysql-bkup/pkg"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
)
var MigrateCmd = &cobra.Command{
Use: "migrate",
Short: "Migrate database from a source database to a target database",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
pkg.StartMigration(cmd)
} else {
utils.Fatal("Error, no argument required")
}
},
}

View File

@@ -24,5 +24,7 @@ var RestoreCmd = &cobra.Command{
func init() {
//Restore
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
}

View File

@@ -1,7 +1,9 @@
// Package cmd /*
/*
Copyright © 2024 Jonas Kaninda
*/
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd
import (
@@ -30,13 +32,11 @@ func Execute() {
}
func init() {
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
rootCmd.AddCommand(VersionCmd)
rootCmd.AddCommand(BackupCmd)
rootCmd.AddCommand(RestoreCmd)
rootCmd.AddCommand(MigrateCmd)
}

View File

@@ -1,9 +1,11 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd
/*
Copyright © 2024 Jonas Kaninda
*/
import (
"fmt"
"github.com/spf13/cobra"

View File

@@ -10,11 +10,11 @@ RUN go mod download
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
FROM ubuntu:24.04
ENV DB_HOST=""
ENV DB_HOST="localhost"
ENV DB_NAME=""
ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT="3306"
ENV DB_PORT=3306
ENV STORAGE=local
ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME=""
@@ -30,11 +30,15 @@ ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22"
ENV TARGET_DB_HOST=""
ENV TARGET_DB_PORT=3306
ENV TARGET_DB_NAME="localhost"
ENV TARGET_DB_USERNAME=""
ENV TARGET_DB_PASSWORD=""
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.2.2"
ENV VERSION="v1.2.5"
ENV BACKUP_CRON_EXPRESSION=""
ENV GNUPGHOME="/tmp/gnupg"
ARG WORKDIR="/app"
ARG WORKDIR="/config"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
@@ -49,16 +53,14 @@ RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR && \
mkdir -p $GNUPGHOME
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT && \
chmod 777 $GNUPGHOME
chmod 777 $BACKUP_CRON_SCRIPT
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup
@@ -67,19 +69,15 @@ RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
WORKDIR $WORKDIR
# Create backup shell script
COPY <<EOF /usr/local/bin/backup
#!/bin/sh
# shellcheck disable=SC2068
/usr/local/bin/mysql-bkup backup $@
EOF
# Create restore shell script
COPY <<EOF /usr/local/bin/restore
#!/bin/sh
# shellcheck disable=SC2068
/usr/local/bin/mysql-bkup restore $@
EOF
RUN chmod +x /usr/local/bin/backup && \
# Create backup script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
chmod +x /usr/local/bin/backup
# Create restore script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
chmod +x /usr/local/bin/restore
# Create migrate script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
chmod +x /usr/local/bin/migrate
WORKDIR $WORKDIR
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]

View File

@@ -13,10 +13,11 @@
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
# You can create any custom variable you would like, and they will be accessible
# in the templates via {{ site.myvariable }}.
title: MySQL database backup
title: MySQL Backup Docker container image
email: hi@jonaskaninda.com
description: >- # this means to ignore newlines until "baseurl:"
MySQL Backup and Restore Docker container image. Backup database to AWS S3 storage or SSH remote server.
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database.
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
baseurl: "" # the subpath of your site, e.g. /blog
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com

View File

@@ -104,7 +104,7 @@ spec:
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
- backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"

View File

@@ -111,7 +111,7 @@ spec:
command:
- /bin/sh
- -c
- mysql-bkup backup -s ssh
- backup -s ssh
env:
- name: DB_PORT
value: "3306"

View File

@@ -28,10 +28,9 @@ spec:
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- bkup
- backup
- --storage
- s3
- /bin/sh
- -c
- backup --storage s3
resources:
limits:
memory: "128Mi"
@@ -82,6 +81,8 @@ spec:
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
@@ -138,6 +139,8 @@ spec:
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- restore
- --storage
@@ -192,6 +195,8 @@ spec:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
@@ -233,7 +238,7 @@ spec:
This image also supports Kubernetes security context, you can run it in Rootless environment.
It has been tested on Openshift, it works well.
Deployment on Openshift is supported, you need to remove `securityContext` section on your yaml file.
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
```yaml
apiVersion: batch/v1
@@ -258,6 +263,8 @@ spec:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage

131
docs/how-tos/migrate.md Normal file
View File

@@ -0,0 +1,131 @@
---
title: Migrate database
layout: default
parent: How Tos
nav_order: 9
---
# Migrate database
To migrate the database, you need to add `migrate` command.
{: .note }
The Mysql backup has another great feature: migrating your database from a source database to a target.
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
{: .warning }
The `migrate` operation is irreversible, please backup your target database before this action.
### Docker compose
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: migrate
volumes:
- ./backup:/backup
environment:
## Source database
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Target database
- TARGET_DB_HOST=target-mysql
- TARGET_DB_PORT=3306
- TARGET_DB_NAME=dbname
- TARGET_DB_USERNAME=username
- TARGET_DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Migrate database using Docker CLI
```
## Source database
DB_HOST=mysql
DB_PORT=3306
DB_NAME=dbname
DB_USERNAME=username
DB_PASSWORD=password
## Taget database
TARGET_DB_HOST=target-mysql
TARGET_DB_PORT=3306
TARGET_DB_NAME=dbname
TARGET_DB_USERNAME=username
TARGET_DB_PASSWORD=password
```
```shell
docker run --rm --network your_network_name \
--env-file your-env
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup migrate
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migrate-db
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- migrate
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
## Source Database
- name: DB_HOST
value: "mysql"
- name: DB_PORT
value: "3306"
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
- name: DB_PASSWORD
value: "password"
## Target Database
- name: TARGET_DB_HOST
value: "target-mysql"
- name: TARGET_DB_PORT
value: "3306"
- name: TARGET_DB_NAME
value: "dbname"
- name: TARGET_DB_USERNAME
value: "username"
- name: TARGET_DB_PASSWORD
value: "password"
restartPolicy: Never
```

View File

@@ -65,7 +65,7 @@ spec:
command:
- /bin/sh
- -c
- bkup restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
- restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"

View File

@@ -63,7 +63,7 @@ spec:
command:
- /bin/sh
- -c
- bkup restore -s ssh -f store_20231219_022941.sql.gz
- restore -s ssh -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"

View File

@@ -6,7 +6,7 @@ nav_order: 1
# About mysql-bkup
{:.no_toc}
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage.
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage.
It also supports __encrypting__ your backups using GPG.
We are open to receiving stars, PRs, and issues!
@@ -19,7 +19,8 @@ We are open to receiving stars, PRs, and issues!
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
It also supports __encrypting__ your backups using GPG.
It also supports database __encryption__ using GPG.
{: .note }
Code and documentation for `v1` version on [this branch][v1-branch].
@@ -68,7 +69,7 @@ services:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
@@ -78,6 +79,49 @@ services:
networks:
web:
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries

View File

@@ -6,7 +6,7 @@ nav_order: 2
# Configuration reference
Backup and restore targets, schedule and retention are configured using environment variables or flags.
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
@@ -19,6 +19,7 @@ Backup and restore targets, schedule and retention are configured using environm
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| migrate | | Migrate database from one instance to another one |
| --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name for restoration |
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
@@ -35,7 +36,7 @@ Backup and restore targets, schedule and retention are configured using environm
## Environment variables
| Name | Requirement | Description |
|-------------------|--------------------------------------------------|------------------------------------------------------|
|------------------------|----------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
@@ -48,14 +49,21 @@ Backup and restore targets, schedule and retention are configured using environm
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| Gmysql_PASSPHRASE | Optional, required to encrypt and restore backup | Gmysql passphrase |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
---
## Run in Scheduled mode

View File

@@ -1,34 +1,37 @@
apiVersion: batch/v1
kind: CronJob
kind: Job
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
name: backup
spec:
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
- backup --storage s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
value: "dbname"
- name: DB_USERNAME
value: ""
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: ACCESS_KEY
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
@@ -41,4 +44,4 @@ spec:
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: OnFailure
restartPolicy: Never

13
main.go
View File

@@ -1,12 +1,11 @@
// Package main /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package main
//main
/*****
* MySQL Backup & Restore
* @author Jonas Kaninda
* @license MIT License <https://opensource.org/licenses/MIT>
* @link https://github.com/jkaninda/mysql-bkup
**/
import "github.com/jkaninda/mysql-bkup/cmd"
func main() {

View File

@@ -1,7 +1,9 @@
// Package pkg /*
/*
Copyright © 2024 Jonas Kaninda
*/
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
@@ -17,11 +19,9 @@ import (
)
func StartBackup(cmd *cobra.Command) {
_, _ = cmd.Flags().GetString("operation")
utils.Welcome()
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
//Get flag value and set env
@@ -32,37 +32,38 @@ func StartBackup(cmd *cobra.Command) {
prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
executionMode, _ = cmd.Flags().GetString("mode")
dbName = os.Getenv("DB_NAME")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
dbConf = getDbConfig(cmd)
//
if gpqPassphrase != "" {
encryption = true
}
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
if disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
}
if executionMode == "default" {
switch storage {
case "s3":
s3Backup(backupFileName, disableCompression, prune, backupRetention, encryption)
s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "local":
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "ssh", "remote":
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
case "ftp":
utils.Fatal("Not supported storage type: %s", storage)
default:
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
}
} else if executionMode == "scheduled" {
scheduledMode(storage)
scheduledMode(dbConf, storage)
} else {
utils.Fatal("Error, unknown execution mode!")
}
@@ -70,7 +71,7 @@ func StartBackup(cmd *cobra.Command) {
}
// Run in scheduled mode
func scheduledMode(storage string) {
func scheduledMode(db *dbConfig, storage string) {
fmt.Println()
fmt.Println("**********************************")
@@ -81,7 +82,7 @@ func scheduledMode(storage string) {
utils.Info("Storage type %s ", storage)
//Test database connexion
utils.TestDatabaseConnection()
testDatabaseConnection(db)
utils.Info("Creating backup job...")
CreateCrontabScript(disableCompression, storage)
@@ -117,12 +118,7 @@ func scheduledMode(storage string) {
}
// BackupDatabase backup database
func BackupDatabase(backupFileName string, disableCompression bool) {
dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
storagePath = os.Getenv("STORAGE_PATH")
err := utils.CheckEnvVars(dbHVars)
@@ -132,7 +128,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
}
utils.Info("Starting database backup...")
utils.TestDatabaseConnection()
testDatabaseConnection(db)
// Backup Database database
utils.Info("Backing up database...")
@@ -140,11 +136,11 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
if disableCompression {
// Execute mysqldump
cmd := exec.Command("mysqldump",
"-h", dbHost,
"-P", dbPort,
"-u", dbUserName,
"--password="+dbPassword,
dbName,
"-h", db.dbHost,
"-P", db.dbPort,
"-u", db.dbUserName,
"--password="+db.dbPassword,
db.dbName,
)
output, err := cmd.Output()
if err != nil {
@@ -166,7 +162,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
} else {
// Execute mysqldump
cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName)
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
@@ -189,9 +185,9 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
}
}
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to local storage")
BackupDatabase(backupFileName, disableCompression)
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
@@ -199,6 +195,8 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
}
utils.Info("Backup name is %s", finalFileName)
moveToBackup(finalFileName, storagePath)
//Send notification
utils.NotifySuccess(finalFileName)
//Delete old backup
if prune {
deleteOldBackup(backupRetention)
@@ -207,12 +205,12 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
deleteTemp()
}
func s3Backup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
utils.Info("Backup database to s3 storage")
//Backup database
BackupDatabase(backupFileName, disableCompression)
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
@@ -240,13 +238,15 @@ func s3Backup(backupFileName string, disableCompression bool, prune bool, backup
}
}
utils.Done("Uploading backup archive to remote storage S3 ... done ")
//Send notification
utils.NotifySuccess(finalFileName)
//Delete temp
deleteTemp()
}
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to Remote server")
//Backup database
BackupDatabase(backupFileName, disableCompression)
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
@@ -273,6 +273,8 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune
}
utils.Done("Uploading backup archive to remote storage ... done ")
//Send notification
utils.NotifySuccess(finalFileName)
//Delete temp
deleteTemp()
}

View File

@@ -1,4 +1,64 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
)
type Config struct {
}
type dbConfig struct {
dbHost string
dbPort string
dbName string
dbUserName string
dbPassword string
}
type targetDbConfig struct {
targetDbHost string
targetDbPort string
targetDbUserName string
targetDbPassword string
targetDbName string
}
func getDbConfig(cmd *cobra.Command) *dbConfig {
//Set env
utils.GetEnv(cmd, "dbname", "DB_NAME")
dConf := dbConfig{}
dConf.dbHost = os.Getenv("DB_HOST")
dConf.dbPort = os.Getenv("DB_PORT")
dConf.dbName = os.Getenv("DB_NAME")
dConf.dbUserName = os.Getenv("DB_USERNAME")
dConf.dbPassword = os.Getenv("DB_PASSWORD")
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &dConf
}
func getTargetDbConfig() *targetDbConfig {
tdbConfig := targetDbConfig{}
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
err := utils.CheckEnvVars(tdbRVars)
if err != nil {
utils.Error("Please make sure all required environment variables for the target database are set")
utils.Fatal("Error checking target database environment variables: %s", err)
}
return &tdbConfig
}

View File

@@ -1,3 +1,9 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
@@ -9,11 +15,17 @@ import (
func Decrypt(inputFile string, passphrase string) error {
utils.Info("Decrypting backup file: " + inputFile + " ...")
//Create gpg home dir
err := utils.MakeDir(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
err = cmd.Run()
if err != nil {
return err
}
@@ -24,11 +36,17 @@ func Decrypt(inputFile string, passphrase string) error {
func Encrypt(inputFile string, passphrase string) error {
utils.Info("Encrypting backup...")
//Create gpg home dir
err := utils.MakeDir(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
err = cmd.Run()
if err != nil {
return err
}

View File

@@ -1,9 +1,17 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"bytes"
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"os/exec"
"path/filepath"
"time"
)
@@ -96,3 +104,24 @@ func deleteTemp() {
utils.Info("Deleting %s ... done", tmpPath)
}
}
// TestDatabaseConnection tests the database connection
func testDatabaseConnection(db *dbConfig) {
utils.Info("Connecting to %s database ...", db.dbName)
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Run()
if err != nil {
utils.Error("Error testing database connection: %v\nOutput: %s", err, out.String())
os.Exit(1)
}
utils.Info("Successfully connected to %s database", db.dbName)
}

40
pkg/migrate.go Normal file
View File

@@ -0,0 +1,40 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"time"
)
func StartMigration(cmd *cobra.Command) {
utils.Welcome()
utils.Info("Starting database migration...")
//Get DB config
dbConf = getDbConfig(cmd)
targetDbConf = getTargetDbConfig()
//Defining the target database variables
newDbConfig := dbConfig{}
newDbConfig.dbHost = targetDbConf.targetDbHost
newDbConfig.dbPort = targetDbConf.targetDbPort
newDbConfig.dbName = targetDbConf.targetDbName
newDbConfig.dbUserName = targetDbConf.targetDbUserName
newDbConfig.dbPassword = targetDbConf.targetDbPassword
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
//Backup source Database
BackupDatabase(dbConf, backupFileName, true)
//Restore source database into target database
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
RestoreDatabase(&newDbConfig, backupFileName)
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
utils.Info("Database migration completed.")
}

View File

@@ -1,3 +1,9 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
@@ -10,11 +16,9 @@ import (
)
func StartRestore(cmd *cobra.Command) {
utils.Welcome()
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
//Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
@@ -23,47 +27,45 @@ func StartRestore(cmd *cobra.Command) {
file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
dbConf = getDbConfig(cmd)
switch storage {
case "s3":
restoreFromS3(file, bucket, s3Path)
restoreFromS3(dbConf, file, bucket, s3Path)
case "local":
utils.Info("Restore database from local")
copyToTmp(storagePath, file)
RestoreDatabase(file)
RestoreDatabase(dbConf, file)
case "ssh":
restoreFromRemote(file, remotePath)
restoreFromRemote(dbConf, file, remotePath)
case "ftp":
utils.Fatal("Restore from FTP is not yet supported")
default:
utils.Info("Restore database from local")
RestoreDatabase(file)
copyToTmp(storagePath, file)
RestoreDatabase(dbConf, file)
}
}
func restoreFromS3(file, bucket, s3Path string) {
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil {
utils.Fatal("Error download file from s3 %s %v", file, err)
}
RestoreDatabase(file)
RestoreDatabase(db, file)
}
func restoreFromRemote(file, remotePath string) {
func restoreFromRemote(db *dbConfig, file, remotePath string) {
utils.Info("Restore database from remote server")
err := CopyFromRemote(file, remotePath)
if err != nil {
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
}
RestoreDatabase(file)
RestoreDatabase(db, file)
}
// RestoreDatabase restore database
func RestoreDatabase(file string) {
dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
func RestoreDatabase(db *dbConfig, file string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
if file == "" {
utils.Fatal("Error, file required")
@@ -93,13 +95,13 @@ func RestoreDatabase(file string) {
}
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
utils.TestDatabaseConnection()
testDatabaseConnection(db)
utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
// Restore from compressed file / .sql.gz
if extension == ".gz" {
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " --password=" + db.dbPassword + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database %v", err)
@@ -111,7 +113,7 @@ func RestoreDatabase(file string) {
} else if extension == ".sql" {
//Restore from sql file
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " --password=" + db.dbPassword + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))

View File

@@ -1,3 +1,9 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (

View File

@@ -1,9 +1,11 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
// Package pkg /*
/*
Copyright © 2024 Jonas Kaninda
*/
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"

View File

@@ -1,19 +1,21 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
const cronLogFile = "/var/log/mysql-bkup.log"
const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
const algorithm = "aes256"
const gpgHome = "gnupg"
const gpgExtension = "gpg"
var (
storage = "local"
file = ""
dbPassword = ""
dbUserName = ""
dbName = ""
dbHost = ""
dbPort = "3306"
executionMode = "default"
storagePath = "/backup"
disableCompression = false
@@ -27,6 +29,16 @@ var dbHVars = []string{
"DB_USERNAME",
"DB_NAME",
}
var tdbRVars = []string{
"TARGET_DB_HOST",
"TARGET_DB_PORT",
"TARGET_DB_NAME",
"TARGET_DB_USERNAME",
"TARGET_DB_PASSWORD",
}
var dbConf *dbConfig
var targetDbConf *targetDbConfig
// sshHVars Required environment variables for SSH remote server storage
var sshHVars = []string{

View File

@@ -1,3 +1,9 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +

View File

@@ -1,3 +1,9 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
import (
@@ -49,8 +55,13 @@ func Fatal(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
NotifyError(msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
NotifyError(formattedMessage)
}
os.Exit(1)
os.Kill.Signal()
}

75
utils/notification.go Normal file
View File

@@ -0,0 +1,75 @@
package utils
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
)
func sendMessage(msg string) {
Info("Sending notification... ")
chatId := os.Getenv("TG_CHAT_ID")
body, _ := json.Marshal(map[string]string{
"chat_id": chatId,
"text": msg,
})
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
// Create an HTTP post request
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
panic(err)
}
request.Header.Add("Content-Type", "application/json")
client := &http.Client{}
response, err := client.Do(request)
if err != nil {
panic(err)
}
code := response.StatusCode
if code == 200 {
Info("Notification has been sent")
} else {
body, _ := ioutil.ReadAll(response.Body)
Error("Message not sent, error: %s", string(body))
}
}
func NotifySuccess(fileName string) {
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
//Telegram notification
err := CheckEnvVars(vars)
if err == nil {
message := "PostgreSQL Backup \n" +
"Database has been backed up \n" +
"Backup name is " + fileName
sendMessage(message)
}
}
func NotifyError(error string) {
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
//Telegram notification
err := CheckEnvVars(vars)
if err == nil {
message := "PostgreSQL Backup \n" +
"An error occurred during database backup \n" +
"Error: " + error
sendMessage(message)
}
}
func getTgUrl() string {
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
}

View File

@@ -1,3 +1,9 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
import (
@@ -122,7 +128,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
fmt.Println("Failed to download file", err)
return err
}
Info(fmt.Sprintf("Backup downloaded: ", file.Name(), " bytes size ", numBytes))
Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
return nil
}

View File

@@ -1,19 +1,18 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
/*****
* MySQL Backup & Restore
* @author Jonas Kaninda
* @license MIT License <https://opensource.org/licenses/MIT>
* @link https://github.com/jkaninda/mysql-bkup
**/
import (
"bytes"
"fmt"
"github.com/spf13/cobra"
"io"
"io/fs"
"os"
"os/exec"
"strconv"
)
func FileExists(filename string) bool {
@@ -90,34 +89,6 @@ func IsDirEmpty(name string) (bool, error) {
return true, nil
}
// TestDatabaseConnection tests the database connection
func TestDatabaseConnection() {
dbHost := os.Getenv("DB_HOST")
dbPassword := os.Getenv("DB_PASSWORD")
dbUserName := os.Getenv("DB_USERNAME")
dbName := os.Getenv("DB_NAME")
dbPort := os.Getenv("DB_PORT")
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
Fatal("Please make sure all required database environment variables are set")
} else {
Info("Connecting to database ...")
cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Run()
if err != nil {
Error("Error testing database connection: %v\nOutput: %s", err, out.String())
os.Exit(1)
}
Info("Successfully connected to database")
}
}
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
value, _ := cmd.Flags().GetString(flagName)
if value != "" {
@@ -182,3 +153,39 @@ func CheckEnvVars(vars []string) error {
return nil
}
func Welcome() {
fmt.Println()
fmt.Println("**********************************")
fmt.Println(" MySQL Backup ")
fmt.Println(" @Copyright © 2024 jkaninda ")
fmt.Println("***********************************")
}
// MakeDir create directory
func MakeDir(dirPath string) error {
err := os.Mkdir(dirPath, 0700)
if err != nil {
return err
}
return nil
}
// MakeDirAll create directory
func MakeDirAll(dirPath string) error {
err := os.MkdirAll(dirPath, 0700)
if err != nil {
return err
}
return nil
}
func GetIntEnv(envName string) int {
val := os.Getenv(envName)
if val == "" {
return 0
}
ret, err := strconv.Atoi(val)
if err != nil {
Error("Error: %v", err)
}
return ret
}