Compare commits

...

30 Commits
v0.5 ... v1.0

Author SHA1 Message Date
204f66badf Merge pull request #58 from jkaninda/develop
fix: fix github action by getting tag name
2024-08-03 16:23:46 +02:00
e0b40ed433 fix: fix github action by getting tag name 2024-08-03 16:23:08 +02:00
07d717fad2 Merge pull request #57 from jkaninda/v1.0
docs: update readme
2024-08-03 16:15:08 +02:00
3bf4911dee docs: update readme 2024-08-03 16:14:34 +02:00
0b34a835f7 Merge pull request #56 from jkaninda/v1.0
docs: update readme
2024-08-03 16:09:33 +02:00
22bf95e6ca docs: update readme 2024-08-03 16:08:24 +02:00
445a104943 Merge pull request #55 from jkaninda/v1.0
Add SSH storage, add database backup encrypt and decrypt
2024-08-03 16:05:55 +02:00
caeba955c5 Add SSH storage, add database backup encrypt and decrypt 2024-08-03 16:03:17 +02:00
d906de6b54 Merge pull request #54 from jkaninda/develop
Add maintaining log in scheduled mode
2024-07-28 22:56:59 +02:00
c8e68af09f Add maintaining log in scheduled mode 2024-07-28 22:54:31 +02:00
082ef09500 Merge pull request #53 from jkaninda/develop
docs: add supported extensions to the doc
2024-02-25 21:35:33 +01:00
2e61054334 docs: add suported extensions to the doc 2024-02-25 21:35:05 +01:00
f394f28357 Merge pull request #52 from jkaninda/develop
refactor: clean up code
2024-02-25 14:39:03 +01:00
d8867a9baf refactor: clean up code 2024-02-25 14:38:32 +01:00
6ed9ff0a31 Merge pull request #51 from jkaninda/develop
refactor: refactoring of code
2024-02-20 07:58:08 +01:00
a4c37e1a4b refactor: refactoring of code 2024-02-20 07:57:21 +01:00
c6930a00ba Merge pull request #50 from jkaninda/develop
refactor: refactoring of code, improvement of deleteOldBackup function
2024-02-18 13:29:01 +01:00
00f2fca8e4 refactor: refactoring of code, improvement of deleteOldBackup function 2024-02-18 13:27:54 +01:00
9588c5bcee Merge pull request #49 from jkaninda/develop
Develop
2024-02-17 18:25:37 +01:00
e0457a4ed8 refactor: clean up code 2024-02-17 18:24:45 +01:00
d3efa3fc05 chore: update docker tag version 2024-02-17 18:23:27 +01:00
7bcde78136 feat: add backup prune, to delete old backup 2024-02-17 18:17:41 +01:00
b95601ab57 Merge pull request #48 from jkaninda/develop
chore: rename env variable in Dockerfile
2024-01-22 21:19:44 +01:00
facd57e2cd chore: rename env variable in Dockerfile 2024-01-22 21:18:51 +01:00
2fa7e50485 Merge pull request #47 from jkaninda/develop
docs: refactoring of code, update doc
2024-01-21 15:27:14 +01:00
f53c68cd5c New release 2024-01-21 15:26:20 +01:00
902695032c docs: add more details for env variables 2024-01-21 15:24:44 +01:00
620801cb99 refactor: refactoring of code, update docs 2024-01-21 15:18:35 +01:00
e19643ebcb Merge pull request #46 from jkaninda/refactor
docs: update docs
2024-01-20 14:34:36 +01:00
c87201d08d docs: update docs 2024-01-20 14:33:58 +01:00
51 changed files with 2721 additions and 720 deletions

View File

@@ -1,39 +0,0 @@
name: Build
on:
push:
branches: [ "main" ]
workflow_dispatch:
inputs:
docker_tag:
description: 'Docker tag'
required: true
default: 'latest'
type: string
env:
BUILDKIT_IMAGE: jkaninda/mysql-bkup
jobs:
docker:
runs-on: ubuntu-latest
steps:
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build and push
uses: docker/build-push-action@v3
with:
push: true
file: "./docker/Dockerfile"
platforms: linux/amd64,linux/arm64
tags: |
"${{env.BUILDKIT_IMAGE}}:v0.5"
"${{env.BUILDKIT_IMAGE}}:latest"

55
.github/workflows/deploy-docs.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: Deploy Documenation site to GitHub Pages
on:
push:
branches: ['main']
paths:
- 'docs/**'
- '.github/workflows/deploy-docs.yml'
workflow_dispatch:
permissions:
contents: read
pages: write
id-token: write
concurrency:
group: 'pages'
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: '3.2'
bundler-cache: true
cache-version: 0
working-directory: docs
- name: Setup Pages
id: pages
uses: actions/configure-pages@v2
- name: Build with Jekyll
working-directory: docs
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env:
JEKYLL_ENV: production
- name: Upload artifact
uses: actions/upload-pages-artifact@v1
with:
path: 'docs/_site/'
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: build
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v1

49
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,49 @@
name: Release
on:
push:
tags:
- v**
env:
BUILDKIT_IMAGE: jkaninda/mysql-bkup
jobs:
docker:
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Log in to GHCR
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
-
name: Get the tag name
id: get_tag_name
run: echo "TAG_NAME=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
-
name: Build and push
uses: docker/build-push-action@v3
with:
push: true
file: "./docker/Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: |
"${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"${{env.BUILDKIT_IMAGE}}:latest"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:latest"

1
.gitignore vendored
View File

@@ -8,3 +8,4 @@ test.md
mysql-bkup
/.DS_Store
/.idea
bin

46
Makefile Normal file
View File

@@ -0,0 +1,46 @@
BINARY_NAME=mysql-bkup
include .env
export
run:
go run . backup
build:
go build -o bin/${BINARY_NAME} .
compile:
GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 .
GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 .
GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 .
GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 .
docker-build:
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
docker-run: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2
docker-restore: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore -f ${FILE_NAME}
docker-run-scheduled: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --mode scheduled --period "* * * * *"
docker-run-scheduled-s3: docker-build
docker run --rm --network web --user 1000:1000 --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
docker-run-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --path /custom-path
docker-restore-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore --storage s3 -f ${FILE_NAME} --path /custom-path
docker-run-ssh: docker-build
docker run --rm --network web -v "${SSH_IDENTIFY_FILE_LOCAL}:" --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage ssh
docker-restore-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/mysql-bkup bkup restore --storage ssh -f ${FILE_NAME}
run-docs:
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t

371
README.md
View File

@@ -1,22 +1,25 @@
# MySQL Backup
MySQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage.
mysql-bkup is a Docker container image that can be used to backup and restore Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
It also supports __encrypting__ your backups using GPG.
[![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/build.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/build.yml)
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
It also supports __encrypting__ your backups using GPG.
[![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
[![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square)
![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/mysql-bkup?style=flat-square)
<p align="center">
<a href="https://github.com/jkaninda/mysql-bkup">
<img src="https://www.mysql.com/common/logos/logo-mysql-170x115.png" alt="Logo">
</a>
</p>
> Runs on:
- Docker
- Kubernetes
> Links:
## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
## Links:
- [Docker Hub](https://hub.docker.com/r/jkaninda/mysql-bkup)
- [Github](https://github.com/jkaninda/mysql-bkup)
@@ -24,297 +27,69 @@ MySQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 A
- [PostgreSQL](https://github.com/jkaninda/pg-bkup)
## Storage:
- local
- s3
- Object storage
- Local
- AWS S3 or any S3 Alternatives for Object Storage
- SSH
## Volumes:
## Quickstart
- /s3mnt => S3 mounting path
- /backup => local storage mounting path
### Simple backup using Docker CLI
## Usage
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
| Options | Shorts | Usage |
|-----------------------|--------|--------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| history | | Show the history of backup |
| --storage | -s | Set storage. local or s3 (default: local) |
| --file | -f | Set file name for restoration |
| --path | | Set s3 path without file name. eg: /custom_path |
| --dbname | -d | Set database name |
| --port | -p | Set database port (default: 3306) |
| --mode | -m | Set execution mode. default or scheduled (default: default) |
| --disable-compression | | Disable database backup compression |
| --period | | Set crontab period for scheduled mode only. (default: "0 1 * * *") |
| --timeout | -t | Set timeout (default: 60s) |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Note:
Creating a user for backup tasks who has read-only access is recommended!
> create read-only user
```sh
mysql -u root -p
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup mysql-bkup backup -d database_name
```
```sql
CREATE USER read_only_user IDENTIFIED BY 'your_strong_password';
Alternatively, pass a `--env-file` in order to use a full config as described below.
```
```sql
GRANT SELECT, SHOW VIEW ON *.* TO read_only_user;
```
```sql
FLUSH PRIVILEGES;
```
### Simple backup in docker compose file
## Backup database :
Simple backup usage
```sh
bkup backup --dbname database_name
```
```sh
bkup backup -d database_name
```
### S3
```sh
bkup backup --storage s3 --dbname database_name
```
## Docker run:
```sh
docker run --rm --network your_network_name --name mysql-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/mysql-bkup:latest bkup backup -d database_name
```
## Docker compose file:
```yaml
version: '3'
services:
mariadb:
container_name: mariadb
image: mariadb
environment:
MYSQL_DATABASE: mariadb
MYSQL_USER: mariadb
MYSQL_PASSWORD: password
MYSQL_ROOT_PASSWORD: password
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- bkup backup -d database_name
- mysql-bkup backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mariadb
- DB_USERNAME=mariadb
- DB_PORT=5432
- DB_HOST=postgres
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Restore database :
## Deploy on Kubernetes
Simple database restore operation usage
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
```sh
bkup restore --dbname database_name --file database_20231217_115621.sql
```
```sh
bkup restore -f database_20231217_115621.sql
```
### S3
```sh
bkup restore --storage s3 --file database_20231217_115621.sql
```
## Docker run:
```sh
docker run --rm --network your_network_name --name mysql-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/mysql-bkup bkup backup -d database_name -f db_20231219_022941.sql.gz
```
## Docker compose file:
```yaml
version: '3'
services:
mariadb:
container_name: mariadb
image: mariadb:latest
environment:
MYSQL_DATABASE: mariadb
MYSQL_USER: mariadb
MYSQL_PASSWORD: password
MYSQL_ROOT_PASSWORD: password
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- bkup restore --file database_20231217_115621.sql --dbname database_name
volumes:
- ./backup:/backup
environment:
#- FILE_NAME=mariadb_20231217_040238.sql # Optional if file name is set from command
- DB_PORT=3306
- DB_HOST=mariadb
- DB_NAME=mariadb
- DB_USERNAME=mariadb
- DB_PASSWORD=password
```
## Run
```sh
docker-compose up -d
```
## Backup to S3
```sh
docker run --rm --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=db_hostname" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" -e "ACCESS_KEY=your_access_key" -e "SECRET_KEY=your_secret_key" -e "BUCKETNAME=your_bucket_name" -e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" jkaninda/mysql-bkup bkup backup -s s3 -d database_name
```
> To change s3 backup path add this flag : --path /myPath . default path is /mysql_bkup
Simple S3 backup usage
```sh
bkup backup --storage s3 --dbname mydatabase
```
```yaml
version: '3'
services:
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- mysql-bkup restore --storage s3 -f database_20231217_115621.sql.gz
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=mariadb
- DB_USERNAME=mariadb
- DB_PASSWORD=password
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKETNAME=${BUCKETNAME}
- S3_ENDPOINT=${S3_ENDPOINT}
```
## Run in Scheduled mode
This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
Make an automated backup on Docker
## Syntax of crontab (field description)
The syntax is:
- 1: Minute (0-59)
- 2: Hours (0-23)
- 3: Day (0-31)
- 4: Month (0-12 [12 == December])
- 5: Day of the week(0-7 [7 or 0 == sunday])
Easy to remember format:
```conf
* * * * * command to be executed
```
```conf
- - - - -
| | | | |
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
| | | ------- Month (1 - 12)
| | --------- Day of month (1 - 31)
| ----------- Hour (0 - 23)
------------- Minute (0 - 59)
```
> At every 30th minute
```conf
*/30 * * * *
```
> “At minute 0.” every hour
```conf
0 * * * *
```
> “At 01:00.” every day
```conf
0 1 * * *
```
## Example of scheduled mode
> Docker run :
```sh
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *"
```
> With Docker compose
```yaml
version: "3"
services:
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- bkup backup --storage s3 --path /mys3_custome_path --dbname database_name --mode scheduled --period "*/30 * * * *"
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKETNAME=${BUCKETNAME}
- S3_ENDPOINT=${S3_ENDPOINT}
```
## Kubernetes CronJob
For Kubernetes you don't need to run it in scheduled mode.
Simple Kubernetes CronJob usage:
### Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: mysql-bkup-job
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
@@ -324,15 +99,13 @@ spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
securityContext:
privileged: true
command:
- /bin/sh
- -c
- bkup backup -s s3 --path /custom_path
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
value: "5432"
- name: DB_HOST
value: ""
- name: DB_NAME
@@ -341,22 +114,48 @@ spec:
value: ""
# Please use secret!
- name: DB_PASSWORD
value: "password"
- name: ACCESS_KEY
value: ""
- name: SECRET_KEY
value: ""
- name: BUCKETNAME
value: ""
- name: S3_ENDPOINT
value: "https://s3.us-west-2.amazonaws.com"
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
```
## Available image registries
## Contributing
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
```
docker pull jkaninda/mysql-bkup:v1.0
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
## Supported Engines
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
## References
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.
- Kubernetes is supported.
Contributions are welcome! If you encounter any issues or have suggestions for improvements, please create an issue or submit a pull request.
Make sure to follow the existing coding style and provide tests for your changes.
## License

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env bash
if [ $# -eq 0 ]
then
tag='latest'
else
tag=$1
fi
#go build
CGO_ENABLED=0 GOOS=linux go build
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:$tag .
#docker compose up -d --force-recreate

View File

@@ -21,8 +21,10 @@ var BackupCmd = &cobra.Command{
func init() {
//Backup
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Set execution mode. default or scheduled")
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Set schedule period time")
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
}

View File

@@ -1,14 +0,0 @@
package cmd
import (
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
)
var HistoryCmd = &cobra.Command{
Use: "history",
Short: "Show the history of backup",
Run: func(cmd *cobra.Command, args []string) {
utils.ShowHistory()
},
}

View File

@@ -1,11 +1,10 @@
// Package cmd /*
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda@gmail.com>
Copyright © 2024 Jonas Kaninda
*/
package cmd
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
@@ -18,19 +17,8 @@ var rootCmd = &cobra.Command{
Long: `MySQL Database backup and restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage.`,
Example: utils.MainExample,
Version: appVersion,
//TODO: To remove
//For old user || To remove
Run: func(cmd *cobra.Command, args []string) {
if operation != "" {
if operation == "backup" || operation == "restore" {
fmt.Println(utils.Notice)
utils.Fatal("New config required, please check --help")
}
}
},
}
var operation = ""
var s3Path = "/mysql-bkup"
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
@@ -42,21 +30,13 @@ func Execute() {
}
func init() {
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3")
rootCmd.PersistentFlags().StringP("path", "P", s3Path, "Set s3 path, without file name. for S3 storage only")
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name")
rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout")
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Set database port")
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
rootCmd.PersistentFlags().StringP("mode", "m", "default", "Set execution mode. default or scheduled")
rootCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Set schedule period time")
rootCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
rootCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
rootCmd.AddCommand(VersionCmd)
rootCmd.AddCommand(BackupCmd)
rootCmd.AddCommand(RestoreCmd)
rootCmd.AddCommand(S3MountCmd)
rootCmd.AddCommand(HistoryCmd)
}

View File

@@ -1,14 +0,0 @@
package cmd
import (
"github.com/jkaninda/mysql-bkup/pkg"
"github.com/spf13/cobra"
)
var S3MountCmd = &cobra.Command{
Use: "s3mount",
Short: "Mount AWS S3 storage",
Run: func(cmd *cobra.Command, args []string) {
pkg.S3Mount()
},
}

View File

@@ -1,7 +1,7 @@
package cmd
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda@gmail.com>
Copyright © 2024 Jonas Kaninda
*/
import (

View File

@@ -1,4 +1,4 @@
FROM golang:1.21.0 AS build
FROM golang:1.22.5 AS build
WORKDIR /app
# Copy the source code.
@@ -16,35 +16,51 @@ ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT="3306"
ENV STORAGE=local
ENV BUCKETNAME=""
ENV ACCESS_KEY=""
ENV SECRET_KEY=""
ENV S3_ENDPOINT=https://s3.amazonaws.com
ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME=""
ENV AWS_ACCESS_KEY=""
ENV AWS_SECRET_KEY=""
ENV AWS_REGION="us-west-2"
ENV AWS_DISABLE_SSL="false"
ENV GPG_PASSPHRASE=""
ENV SSH_USER=""
ENV SSH_REMOTE_PATH=""
ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22"
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v0.5"
LABEL authors="Jonas Kaninda"
ENV VERSION="v1.0"
ARG WORKDIR="/app"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
LABEL author="Jonas Kaninda"
RUN apt-get update -qq
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
RUN apt install s3fs mysql-client supervisor cron -y
RUN apt install mysql-client supervisor cron gnupg -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir /s3mnt
RUN mkdir /tmp/s3cache
RUN chmod 777 /s3mnt
RUN chmod 777 /tmp/s3cache
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/mysql_bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
RUN mkdir /backup
WORKDIR /backup
WORKDIR $WORKDIR

3
docs/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
_site
.sass-cache
.jekyll-metadata

24
docs/404.html Normal file
View File

@@ -0,0 +1,24 @@
---
layout: default
---
<style type="text/css" media="screen">
.container {
margin: 10px auto;
max-width: 600px;
text-align: center;
}
h1 {
margin: 30px 0;
font-size: 4em;
line-height: 1;
letter-spacing: -1px;
}
</style>
<div class="container">
<h1>404</h1>
<p><strong>Page not found :(</strong></p>
<p>The requested page could not be found.</p>
</div>

12
docs/Dockerfile Normal file
View File

@@ -0,0 +1,12 @@
FROM ruby:3.3.4
ENV LC_ALL C.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
WORKDIR /usr/src/app
COPY . ./
RUN gem install bundler && bundle install
EXPOSE 4000

43
docs/Gemfile Normal file
View File

@@ -0,0 +1,43 @@
source "https://rubygems.org"
# Hello! This is where you manage which Jekyll version is used to run.
# When you want to use a different version, change it below, save the
# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
#
# bundle exec jekyll serve
#
# This will help ensure the proper Jekyll version is running.
# Happy Jekylling!
gem "jekyll", "~> 3.10.0"
# This is the default theme for new Jekyll sites. You may change this to anything you like.
gem "minima", "~> 2.0"
# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
# uncomment the line below. To umysqlrade, run `bundle update github-pages`.
# gem "github-pages", group: :jekyll_plugins
# If you have any plugins, put them here!
group :jekyll_plugins do
gem "jekyll-feed", "~> 0.6"
end
# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
# and associated library.
platforms :mingw, :x64_mingw, :mswin, :jruby do
gem "tzinfo", ">= 1", "< 3"
gem "tzinfo-data"
end
# Performance-booster for watching directories on Windows
gem "wdm", "~> 0.1.0", :install_if => Gem.win_platform?
# kramdown v2 ships without the gfm parser by default. If you're using
# kramdown v1, comment out this line.
gem "kramdown-parser-gfm"
# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
# do not have a Java counterpart.
gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]
gem "just-the-docs"

116
docs/Gemfile.lock Normal file
View File

@@ -0,0 +1,116 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.8.7)
public_suffix (>= 2.0.2, < 7.0)
colorator (1.1.0)
concurrent-ruby (1.3.3)
csv (3.3.0)
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
eventmachine (1.2.7)
ffi (1.17.0)
ffi (1.17.0-aarch64-linux-gnu)
ffi (1.17.0-aarch64-linux-musl)
ffi (1.17.0-arm-linux-gnu)
ffi (1.17.0-arm-linux-musl)
ffi (1.17.0-arm64-darwin)
ffi (1.17.0-x86-linux-gnu)
ffi (1.17.0-x86-linux-musl)
ffi (1.17.0-x86_64-darwin)
ffi (1.17.0-x86_64-linux-gnu)
ffi (1.17.0-x86_64-linux-musl)
forwardable-extended (2.6.0)
http_parser.rb (0.8.0)
i18n (1.14.5)
concurrent-ruby (~> 1.0)
jekyll (3.10.0)
addressable (~> 2.4)
colorator (~> 1.0)
csv (~> 3.0)
em-websocket (~> 0.5)
i18n (>= 0.7, < 2)
jekyll-sass-converter (~> 1.0)
jekyll-watch (~> 2.0)
kramdown (>= 1.17, < 3)
liquid (~> 4.0)
mercenary (~> 0.3.3)
pathutil (~> 0.9)
rouge (>= 1.7, < 4)
safe_yaml (~> 1.0)
webrick (>= 1.0)
jekyll-feed (0.17.0)
jekyll (>= 3.7, < 5.0)
jekyll-include-cache (0.2.1)
jekyll (>= 3.7, < 5.0)
jekyll-sass-converter (1.5.2)
sass (~> 3.4)
jekyll-seo-tag (2.8.0)
jekyll (>= 3.8, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
just-the-docs (0.8.2)
jekyll (>= 3.8.5)
jekyll-include-cache
jekyll-seo-tag (>= 2.0)
rake (>= 12.3.1)
kramdown (2.4.0)
rexml
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
liquid (4.0.4)
listen (3.9.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.3.6)
minima (2.5.1)
jekyll (>= 3.5, < 5.0)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (6.0.1)
rake (13.2.1)
rb-fsevent (0.11.2)
rb-inotify (0.11.1)
ffi (~> 1.0)
rexml (3.3.2)
strscan
rouge (3.30.0)
safe_yaml (1.0.5)
sass (3.7.4)
sass-listen (~> 4.0.0)
sass-listen (4.0.0)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
strscan (3.1.0)
wdm (0.1.1)
webrick (1.8.1)
PLATFORMS
aarch64-linux-gnu
aarch64-linux-musl
arm-linux-gnu
arm-linux-musl
arm64-darwin
ruby
x86-linux-gnu
x86-linux-musl
x86_64-darwin
x86_64-linux-gnu
x86_64-linux-musl
DEPENDENCIES
http_parser.rb (~> 0.6.0)
jekyll (~> 3.10.0)
jekyll-feed (~> 0.6)
just-the-docs
kramdown-parser-gfm
minima (~> 2.0)
tzinfo (>= 1, < 3)
tzinfo-data
wdm (~> 0.1.0)
BUNDLED WITH
2.5.16

70
docs/_config.yml Normal file
View File

@@ -0,0 +1,70 @@
# Welcome to Jekyll!
#
# This config file is meant for settings that affect your whole blog, values
# which you are expected to set up once and rarely edit after that. If you find
# yourself editing this file very often, consider using Jekyll's data files
# feature for the data you need to update frequently.
#
# For technical reasons, this file is *NOT* reloaded automatically when you use
# 'bundle exec jekyll serve'. If you change this file, please restart the server process.
# Site settings
# These are used to personalize your new site. If you look in the HTML files,
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
# You can create any custom variable you would like, and they will be accessible
# in the templates via {{ site.myvariable }}.
title: MySQL database backup
email: hi@jonaskaninda.com
description: >- # this means to ignore newlines until "baseurl:"
MySQL Backup and Restore Docker container image. Backup database to AWS S3 storage or SSH remote server.
baseurl: "" # the subpath of your site, e.g. /blog
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
twitter_username: jonaskaninda
github_username: jkaninda
callouts_level: quiet
callouts:
highlight:
color: yellow
important:
title: Important
color: blue
new:
title: New
color: green
note:
title: Note
color: purple
warning:
title: Warning
color: red
# Build settings
markdown: kramdown
theme: just-the-docs
plugins:
- jekyll-feed
aux_links:
'GitHub Repository':
- https://github.com/jkaninda/mysql-bkup
nav_external_links:
- title: GitHub Repository
url: https://github.com/jkaninda/mysql-bkup
footer_content: >-
Copyright &copy; 2024 <a target="_blank" href="https://www.jonaskaninda.com">Jonas Kaninda</a>.
Distributed under the <a href="https://github.com/jkaninda/mysql-bkup/tree/main/LICENSE">MIT License.</a><br>
Something missing, unclear or not working? Open <a href="https://github.com/jkaninda/mysql-bkup/issues">an issue</a>.
# Exclude from processing.
# The following items will not be processed, by default. Create a custom list
# to override the default setting.
# exclude:
# - Gemfile
# - Gemfile2.lock
# - node_modules
# - vendor/bundle/
# - vendor/cache/
# - vendor/gems/
# - vendor/ruby/

View File

@@ -0,0 +1,25 @@
---
layout: post
title: "Welcome to Jekyll!"
date: 2024-07-29 03:36:13 +0200
categories: jekyll update
---
Youll find this post in your `_posts` directory. Go ahead and edit it and re-build the site to see your changes. You can rebuild the site in many different ways, but the most common way is to run `jekyll serve`, which launches a web server and auto-regenerates your site when a file is updated.
To add new posts, simply add a file in the `_posts` directory that follows the convention `YYYY-MM-DD-name-of-post.ext` and includes the necessary front matter. Take a look at the source for this post to get an idea about how it works.
Jekyll also offers powerful support for code snippets:
{% highlight ruby %}
def print_hi(name)
puts "Hi, #{name}"
end
print_hi('Tom')
#=> prints 'Hi, Tom' to STDOUT.
{% endhighlight %}
Check out the [Jekyll docs][jekyll-docs] for more info on how to get the most out of Jekyll. File all bugs/feature requests at [Jekylls GitHub repo][jekyll-gh]. If you have questions, you can ask them on [Jekyll Talk][jekyll-talk].
[jekyll-docs]: https://jekyllrb.com/docs/home
[jekyll-gh]: https://github.com/jekyll/jekyll
[jekyll-talk]: https://talk.jekyllrb.com/

13
docs/docker-compose.yml Normal file
View File

@@ -0,0 +1,13 @@
services:
jekyll:
build:
context: ./
ports:
- 4000:4000
environment:
- JEKYLL_ENV=development
volumes:
- .:/usr/src/app
stdin_open: true
tty: true
command: bundle exec jekyll serve -H 0.0.0.0 -t

View File

@@ -0,0 +1,139 @@
---
title: Backup to AWS S3
layout: default
parent: How Tos
nav_order: 2
---
# Backup to AWS S3
{: .note }
As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3).
You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag.
## Backup to S3
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d database --path /my-custom-path
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Recurring backups to S3
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
### Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: OnFailure
```

View File

@@ -0,0 +1,146 @@
---
title: Backup to SSH
layout: default
parent: How Tos
nav_order: 3
---
# Backup to SSH remote server
As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
{: .note }
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
Accessing the remote server using password is not recommended, use private key instead.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup --storage remote -d database
volumes:
- ./id_ed25519:/tmp/id_ed25519"
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Recurring backups to SSH remote server
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -d database --storage s3 --mode scheduled --period "0 1 * * *"
volumes:
- ./id_ed25519:/tmp/id_ed25519"
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode.
You can deploy it as CronJob.
Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: ""
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: SSH_IDENTIFY_FILE
value: "/home/jkaninda/backups"
restartPolicy: OnFailure
```

89
docs/how-tos/backup.md Normal file
View File

@@ -0,0 +1,89 @@
---
title: Backup
layout: default
parent: How Tos
nav_order: 1
---
# Backup database
To backup the database, you need to add `backup` subcommand to `mysql-bkup` or `bkup`.
{: .note }
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
{: .warning }
Creating a user for backup tasks who has read-only access is recommended!
The backup process can be run in scheduled mode for the recurring backups.
It handles __recurring__ backups of mysql database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -d database
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Backup using Docker CLI
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup mysql-bkup backup -d database_name
```
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -d database --mode scheduled --period "0 1 * * *"
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -0,0 +1,54 @@
---
title: Encrypt backups using GPG
layout: default
parent: How Tos
nav_order: 7
---
# Encrypt backup
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
{: .warning }
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
To decrypt manually, you need to install gnupg
### Decrypt backup
```shell
gpg --batch --passphrase "my-passphrase" \
--output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
### Backup
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -d database
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Required to encrypt backup
- GPG_PASSPHRASE=my-secure-passphrase
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

8
docs/how-tos/index.md Normal file
View File

@@ -0,0 +1,8 @@
---
title: How Tos
layout: default
nav_order: 3
has_children: true
---
## How Tos

View File

@@ -0,0 +1,51 @@
---
title: Restore database from AWS S3
layout: default
parent: How Tos
nav_order: 5
---
# Restore database from S3 storage
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
### Restore
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -0,0 +1,50 @@
---
title: Restore database from SSH
layout: default
parent: How Tos
nav_order: 6
---
# Restore database from SSH remote server
To restore the database from your remote server, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
### Restore
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

43
docs/how-tos/restore.md Normal file
View File

@@ -0,0 +1,43 @@
---
title: Restore database
layout: default
parent: How Tos
nav_order: 4
---
# Restore database
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
### Restore
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup restore -d database -f store_20231219_022941.sql.gz
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

103
docs/index.md Normal file
View File

@@ -0,0 +1,103 @@
---
title: Overview
layout: home
nav_order: 1
---
# About mysql-bkup
{:.no_toc}
mysql-bkup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
It also supports __encrypting__ your backups using GPG.
We are open to receiving stars, PRs, and issues!
{: .fs-6 .fw-300 }
---
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
It also supports __encrypting__ your backups using GPG.
{: .note }
Code and documentation for `v1` version on [this branch][v1-branch].
[v1-branch]: https://github.com/jkaninda/mysql-bkup
---
## Quickstart
### Simple backup using Docker CLI
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup mysql-bkup backup -d database_name
```
Alternatively, pass a `--env-file` in order to use a full config as described below.
### Simple backup in docker compose file
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
```
docker pull jkaninda/mysql-bkup:v1.0
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
## Supported Engines
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
## References
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.
- Kubernetes is supported.

358
docs/old-version/index.md Normal file
View File

@@ -0,0 +1,358 @@
---
layout: page
title: Old version
permalink: /old-version/
---
This is the documentation of mysql-backup for all old versions bellow `v1.0`.
In the old version, S3 storage was mounted using s3fs, so we decided to migrate to the official AWS SDK.
## Storage:
- local
- s3
- Object storage
## Volumes:
- /s3mnt => S3 mounting path
- /backup => local storage mounting path
### Usage
| Options | Shorts | Usage |
|-----------------------|--------|------------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| history | | Show the history of backup |
| --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name to restore |
| --path | | S3 path without file name. eg: /custom_path |
| --dbname | -d | Database name |
| --port | -p | Database port (default: 3306) |
| --mode | -m | Execution mode. default or scheduled (default: default) |
| --disable-compression | | Disable database backup compression |
| --prune | | Delete old backup, default disabled |
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Environment variables
| Name | Requirement | Description |
|-------------|--------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| S3_ENDPOINT | Optional, required for S3 storage | AWS S3 Endpoint |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
## Note:
Creating a user for backup tasks who has read-only access is recommended!
> create read-only user
## Backup database :
Simple backup usage
```sh
bkup backup
```
### S3
```sh
mysql-bkup backup --storage s3
```
## Docker run:
```sh
docker run --rm --network your_network_name \
--name mysql-bkup -v $PWD/backup:/backup/ \
-e "DB_HOST=database_host_name" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" jkaninda/mysql-bkup:v0.7 mysql-bkup backup -d database_name
```
## Docker compose file:
```yaml
version: '3'
services:
postgres:
image: postgres:14.5
container_name: postgres
restart: unless-stopped
volumes:
- ./postgres:/var/lib/postgresql/data
environment:
POSTGRES_DB: bkup
POSTGRES_PASSWORD: password
POSTGRES_USER: bkup
mysql-bkup:
image: jkaninda/mysql-bkup:v0.7
container_name: mysql-bkup
depends_on:
- postgres
command:
- /bin/sh
- -c
- mysql-bkup backup -d bkup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_NAME=bkup
- DB_USERNAME=bkup
- DB_PASSWORD=password
```
## Restore database :
Simple database restore operation usage
```sh
mysql-bkup restore --file database_20231217_115621.sql --dbname database_name
```
```sh
mysql-bkup restore -f database_20231217_115621.sql -d database_name
```
### S3
```sh
mysql-bkup restore --storage s3 --file database_20231217_115621.sql --dbname database_name
```
## Docker run:
```sh
docker run --rm --network your_network_name \
--name mysql-bkup \
-v $PWD/backup:/backup/ \
-e "DB_HOST=database_host_name" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup:v0.7 mysql-bkup restore -d database_name -f store_20231219_022941.sql.gz
```
## Docker compose file:
```yaml
version: '3'
services:
mysql-bkup:
image: jkaninda/mysql-bkup:v0.7
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup restore --file database_20231217_115621.sql -d database_name
volumes:
- ./backup:/backup
environment:
#- FILE_NAME=database_20231217_040238.sql.gz # Optional if file name is set from command
- DB_PORT=3306
- DB_HOST=postgres
- DB_USERNAME=user_name
- DB_PASSWORD=password
```
## Run
```sh
docker-compose up -d
```
## Backup to S3
```sh
docker run --rm --privileged \
--device /dev/fuse --name mysql-bkup \
-e "DB_HOST=db_hostname" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
-e "ACCESS_KEY=your_access_key" \
-e "SECRET_KEY=your_secret_key" \
-e "BUCKETNAME=your_bucket_name" \
-e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" \
jkaninda/mysql-bkup:v0.7 mysql-bkup backup -s s3 -d database_name
```
> To change s3 backup path add this flag : --path /my_customPath . default path is /mysql-bkup
Simple S3 backup usage
```sh
mysql-bkup backup --storage s3 --dbname mydatabase
```
```yaml
mysql-bkup:
image: jkaninda/mysql-bkup:v0.7
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- mysql-bkup restore --storage s3 -f database_20231217_115621.sql.gz --dbname database_name
environment:
- DB_PORT=3306
- DB_HOST=postgress
- DB_USERNAME=user_name
- DB_PASSWORD=password
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKET_NAME=${BUCKET_NAME}
- S3_ENDPOINT=${S3_ENDPOINT}
```
## Run in Scheduled mode
This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
Make an automated backup on Docker
## Syntax of crontab (field description)
The syntax is:
- 1: Minute (0-59)
- 2: Hours (0-23)
- 3: Day (0-31)
- 4: Month (0-12 [12 == December])
- 5: Day of the week(0-7 [7 or 0 == sunday])
Easy to remember format:
```conf
* * * * * command to be executed
```
```conf
- - - - -
| | | | |
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
| | | ------- Month (1 - 12)
| | --------- Day of month (1 - 31)
| ----------- Hour (0 - 23)
------------- Minute (0 - 59)
```
> At every 30th minute
```conf
*/30 * * * *
```
> “At minute 0.” every hour
```conf
0 * * * *
```
> “At 01:00.” every day
```conf
0 1 * * *
```
## Example of scheduled mode
> Docker run :
```sh
docker run --rm --name mysql-bkup \
-v $BACKUP_DIR:/backup/ \
-e "DB_HOST=$DB_HOST" \
-e "DB_USERNAME=$DB_USERNAME" \
-e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:v0.7 mysql-bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *"
```
> With Docker compose
```yaml
version: "3"
services:
mysql-bkup:
image: jkaninda/mysql-bkup:v0.7
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "*/30 * * * *"
environment:
- DB_PORT=3306
- DB_HOST=postgreshost
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKET_NAME=${BUCKET_NAME}
- S3_ENDPOINT=${S3_ENDPOINT}
```
## Kubernetes CronJob
For Kubernetes, you don't need to run it in scheduled mode.
Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup:v0.7
securityContext:
privileged: true
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: ACCESS_KEY
value: ""
- name: SECRET_KEY
value: ""
- name: BUCKET_NAME
value: ""
- name: S3_ENDPOINT
value: "https://s3.us-west-2.amazonaws.com"
restartPolicy: Never
```
## Authors
**Jonas Kaninda**
- <https://github.com/jkaninda>

105
docs/reference/index.md Normal file
View File

@@ -0,0 +1,105 @@
---
title: Configuration Reference
layout: default
nav_order: 2
---
# Configuration reference
Backup and restore targets, schedule and retention are configured using environment variables or flags.
### CLI utility Usage
| Options | Shorts | Usage |
|-----------------------|--------|----------------------------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name for restoration |
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
| --dbname | -d | Database name |
| --port | -p | Database port (default: 3306) |
| --mode | -m | Execution mode. default or scheduled (default: default) |
| --disable-compression | | Disable database backup compression |
| --prune | | Delete old backup, default disabled |
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Environment variables
| Name | Requirement | Description |
|-------------------|--------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| Gmysql_PASSPHRASE | Optional, required to encrypt and restore backup | Gmysql passphrase |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
---
## Run in Scheduled mode
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
## Syntax of crontab (field description)
The syntax is:
- 1: Minute (0-59)
- 2: Hours (0-23)
- 3: Day (0-31)
- 4: Month (0-12 [12 == December])
- 5: Day of the week(0-7 [7 or 0 == sunday])
Easy to remember format:
```conf
* * * * * command to be executed
```
```conf
- - - - -
| | | | |
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
| | | ------- Month (1 - 12)
| | --------- Day of month (1 - 31)
| ----------- Hour (0 - 23)
------------- Minute (0 - 59)
```
> At every 30th minute
```conf
*/30 * * * *
```
> “At minute 0.” every hour
```conf
0 * * * *
```
> “At 01:00.” every day
```conf
0 1 * * *
```

View File

@@ -1,21 +1,31 @@
version: "3"
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- bkup backup --storage s3 --path /mys3_custom_path --dbname database_name
- mysql-bkup backup --storage s3 -d my-database"
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKETNAME=${BUCKETNAME}
- S3_ENDPOINT=https://s3.us-west-2.amazonaws.com
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:

View File

@@ -6,11 +6,11 @@ services:
command:
- /bin/sh
- -c
- bkup backup --dbname database_name --mode scheduled --period "0 1 * * *"
- mysql-bkup backup --dbname database_name --mode scheduled --period "0 1 * * *"
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_HOST=mysql
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}

View File

@@ -1,21 +1,31 @@
version: "3"
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "0 1 * * *"
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKETNAME=${BUCKETNAME}
- S3_ENDPOINT=https://s3.us-west-2.amazonaws.com
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:

View File

@@ -6,11 +6,11 @@ services:
command:
- /bin/sh
- -c
- bkup backup --dbname database_name
- mysql-bkup backup --dbname database_name
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_HOST=mysql
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}

View File

@@ -1,7 +1,7 @@
apiVersion: batch/v1
piVersion: batch/v1
kind: CronJob
metadata:
name: db-bkup-job
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
@@ -11,12 +11,10 @@ spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
securityContext:
privileged: true
command:
- /bin/sh
- -c
- bkup backup --storage s3 --path /custom_path
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
@@ -28,13 +26,19 @@ spec:
value: ""
# Please use secret!
- name: DB_PASSWORD
value: "password"
value: ""
- name: ACCESS_KEY
value: ""
- name: SECRET_KEY
value: ""
- name: BUCKETNAME
value: ""
- name: S3_ENDPOINT
value: "https://s3.us-west-2.amazonaws.com"
restartPolicy: Never
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: OnFailure

18
go.mod
View File

@@ -1,10 +1,22 @@
module github.com/jkaninda/mysql-bkup
go 1.21.0
go 1.22.5
require github.com/spf13/pflag v1.0.5
require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/spf13/cobra v1.8.0 // indirect
github.com/aws/aws-sdk-go v1.55.3
github.com/bramvdbogaerde/go-scp v1.5.0
github.com/hpcloud/tail v1.0.0
github.com/spf13/cobra v1.8.0
golang.org/x/crypto v0.18.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
)
require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
golang.org/x/sys v0.22.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
)

28
go.sum
View File

@@ -1,10 +1,38 @@
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,22 +1,23 @@
// Package pkg /*
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda.gmail.com>
Copyright © 2024 Jonas Kaninda
*/
package pkg
import (
"fmt"
"github.com/hpcloud/tail"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"log"
"os"
"os/exec"
"path/filepath"
"time"
)
func StartBackup(cmd *cobra.Command) {
_, _ = cmd.Flags().GetString("operation")
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
@@ -24,21 +25,41 @@ func StartBackup(cmd *cobra.Command) {
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
//Get flag value and set env
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
executionMode, _ = cmd.Flags().GetString("mode")
dbName = os.Getenv("DB_NAME")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
//
if gpqPassphrase != "" {
encryption = true
}
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
if disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
}
if executionMode == "default" {
if storage == "s3" {
utils.Info("Backup database to s3 storage")
s3Backup(disableCompression, s3Path)
} else {
utils.Info("Backup database to local storage")
BackupDatabase(disableCompression)
switch storage {
case "s3":
s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption)
case "local":
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
case "ssh", "remote":
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
case "ftp":
utils.Fatal("Not supported storage type: %s", storage)
default:
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
}
} else if executionMode == "scheduled" {
scheduledMode()
} else {
@@ -55,7 +76,6 @@ func scheduledMode() {
fmt.Println(" Starting MySQL Bkup... ")
fmt.Println("***********************************")
utils.Info("Running in Scheduled mode")
utils.Info("Log file in /var/log/mysql-bkup.log")
utils.Info("Execution period ", os.Getenv("SCHEDULE_PERIOD"))
//Test database connexion
@@ -64,35 +84,65 @@ func scheduledMode() {
utils.Info("Creating backup job...")
CreateCrontabScript(disableCompression, storage)
supervisorConfig := "/etc/supervisor/supervisord.conf"
// Start Supervisor
supervisordCmd := exec.Command("supervisord", "-c", "/etc/supervisor/supervisord.conf")
if err := supervisordCmd.Run(); err != nil {
utils.Fatalf("Error starting supervisord: %v\n", err)
cmd := exec.Command("supervisord", "-c", supervisorConfig)
err := cmd.Start()
if err != nil {
utils.Fatal(fmt.Sprintf("Failed to start supervisord: %v", err))
}
utils.Info("Backup job started")
defer func() {
if err := cmd.Process.Kill(); err != nil {
utils.Info("Failed to kill supervisord process: %v", err)
} else {
utils.Info("Supervisor stopped.")
}
}()
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) {
utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile))
}
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true})
if err != nil {
utils.Fatal("Failed to tail file: %v", err)
}
// Read and print new lines from the log file
for line := range t.Lines {
fmt.Println(line.Text)
}
}
// BackupDatabase backup database
func BackupDatabase(disableCompression bool) {
func BackupDatabase(backupFileName string, disableCompression bool) {
dbHost = os.Getenv("DB_HOST")
dbPassword := os.Getenv("DB_PASSWORD")
dbUserName := os.Getenv("DB_USERNAME")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
storagePath = os.Getenv("STORAGE_PATH")
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
utils.Fatal("Please make sure all required environment variables for database are set")
} else {
// dbHVars Required environment variables for database
var dbHVars = []string{
"DB_HOST",
"DB_PASSWORD",
"DB_USERNAME",
"DB_NAME",
}
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
utils.Info("Starting database backup...")
utils.TestDatabaseConnection()
// Backup Database database
utils.Info("Backing up database...")
//Generate file name
bkFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
// Verify is compression is disabled
if disableCompression {
//Generate file name
bkFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
// Execute mysqldump
cmd := exec.Command("mysqldump",
"-h", dbHost,
@@ -107,7 +157,7 @@ func BackupDatabase(disableCompression bool) {
}
// save output
file, err := os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
if err != nil {
log.Fatal(err)
}
@@ -128,7 +178,7 @@ func BackupDatabase(disableCompression bool) {
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
gzipCmd.Start()
if err != nil {
log.Fatal(err)
@@ -143,20 +193,93 @@ func BackupDatabase(disableCompression bool) {
}
historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", storagePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
}
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to local storage")
BackupDatabase(backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
}
utils.Info("Backup name is %s", finalFileName)
moveToBackup(finalFileName, storagePath)
//Delete old backup
if prune {
deleteOldBackup(backupRetention)
}
}
func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
utils.Info("Backup database to s3 storage")
//Backup database
BackupDatabase(backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
}
utils.Info("Uploading backup file to S3 storage...")
utils.Info("Backup name is %s", finalFileName)
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
if err != nil {
log.Fatal(err)
}
defer historyFile.Close()
if _, err := historyFile.WriteString(bkFileName + "\n"); err != nil {
log.Fatal(err)
}
}
utils.Fatal("Error uploading file to S3: %s ", err)
}
func s3Backup(disableCompression bool, s3Path string) {
// Backup Database to S3 storage
MountS3Storage(s3Path)
BackupDatabase(disableCompression)
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
// Delete old backup
if prune {
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from S3: %s ", err)
}
}
utils.Done("Database has been backed up and uploaded to s3 ")
}
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to Remote server")
//Backup database
BackupDatabase(backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
}
utils.Info("Uploading backup file to remote server...")
utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, remotePath)
if err != nil {
utils.Fatal("Error uploading file to the remote server: %s ", err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
if prune {
//TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet")
}
utils.Done("Database has been backed up and uploaded to remote server ")
}
func encryptBackup(backupFileName string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
if err != nil {
utils.Fatal("Error during encrypting backup %s", err)
}
}

4
pkg/config.go Normal file
View File

@@ -0,0 +1,4 @@
package pkg
type Config struct {
}

48
pkg/encrypt.go Normal file
View File

@@ -0,0 +1,48 @@
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"os/exec"
"strings"
)
func Decrypt(inputFile string, passphrase string) error {
utils.Info("Decrypting backup file: " + inputFile + " ...")
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
return err
}
utils.Info("Backup file decrypted successful!")
return nil
}
func Encrypt(inputFile string, passphrase string) error {
utils.Info("Encrypting backup...")
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
return err
}
utils.Info("Backup file encrypted successful!")
return nil
}
func RemoveLastExtension(filename string) string {
if idx := strings.LastIndex(filename, "."); idx != -1 {
return filename[:idx]
}
return filename
}

74
pkg/helper.go Normal file
View File

@@ -0,0 +1,74 @@
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"path/filepath"
"time"
)
func copyToTmp(sourcePath string, backupFileName string) {
//Copy backup from storage to /tmp
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
if err != nil {
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
}
}
func moveToBackup(backupFileName string, destinationPath string) {
//Copy backup from tmp folder to storage destination
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
if err != nil {
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
fmt.Println("Error deleting file:", err)
}
utils.Done("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
}
func deleteOldBackup(retentionDays int) {
utils.Info("Deleting old backups...")
storagePath = os.Getenv("STORAGE_PATH")
// Define the directory path
backupDir := storagePath + "/"
// Get current time
currentTime := time.Now()
// Delete file
deleteFile := func(filePath string) error {
err := os.Remove(filePath)
if err != nil {
utils.Fatal(fmt.Sprintf("Error: %s", err))
} else {
utils.Done("File %s has been deleted successfully", filePath)
}
return err
}
// Walk through the directory and delete files modified more than specified days ago
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if it's a regular file and if it was modified more than specified days ago
if fileInfo.Mode().IsRegular() {
timeDiff := currentTime.Sub(fileInfo.ModTime())
if timeDiff.Hours() > 24*float64(retentionDays) {
err := deleteFile(filePath)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
utils.Fatal(fmt.Sprintf("Error: %s", err))
return
}
}

View File

@@ -17,70 +17,118 @@ func StartRestore(cmd *cobra.Command) {
utils.GetEnv(cmd, "port", "DB_PORT")
//Get flag value and set env
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode")
if storage == "s3" {
utils.Info("Restore database from s3")
s3Restore(file, s3Path)
} else {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
switch storage {
case "s3":
restoreFromS3(file, bucket, s3Path)
case "local":
utils.Info("Restore database from local")
copyToTmp(storagePath, file)
RestoreDatabase(file)
case "ssh":
restoreFromRemote(file, remotePath)
case "ftp":
utils.Fatal("Restore from FTP is not yet supported")
default:
utils.Info("Restore database from local")
RestoreDatabase(file)
}
}
func restoreFromS3(file, bucket, s3Path string) {
utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil {
utils.Fatal(fmt.Sprintf("Error download file from s3 %s %s", file, err))
}
RestoreDatabase(file)
}
func restoreFromRemote(file, remotePath string) {
utils.Info("Restore database from remote server")
err := CopyFromRemote(file, remotePath)
if err != nil {
utils.Fatal(fmt.Sprintf("Error download file from remote server: ", filepath.Join(remotePath, file), err))
}
RestoreDatabase(file)
}
// RestoreDatabase restore database
func RestoreDatabase(file string) {
dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
storagePath = os.Getenv("STORAGE_PATH")
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
if file == "" {
utils.Fatal("Error required --file")
utils.Fatal("Error, file required")
}
// dbHVars Required environment variables for database
var dbHVars = []string{
"DB_HOST",
"DB_PASSWORD",
"DB_USERNAME",
"DB_NAME",
}
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" {
utils.Fatal("Please make sure all required environment variables are set")
} else {
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
if extension == ".gpg" {
if gpgPassphrase == "" {
utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
if utils.FileExists(fmt.Sprintf("%s/%s", storagePath, file)) {
} else {
//Decrypt file
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
if err != nil {
utils.Fatal("Error decrypting file ", file, err)
}
//Update file name
file = RemoveLastExtension(file)
}
}
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
err := os.Setenv("mysqlPASSWORD", dbPassword)
if err != nil {
return
}
utils.TestDatabaseConnection()
extension := filepath.Ext(fmt.Sprintf("%s/%s", storagePath, file))
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
// Restore from compressed file / .sql.gz
if extension == ".gz" {
str := "zcat " + fmt.Sprintf("%s/%s", storagePath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database")
utils.Fatal(fmt.Sprintf("Error, in restoring the database %s", err))
}
utils.Done("Database has been restored")
} else if extension == ".sql" {
//Restore from sql file
str := "cat " + fmt.Sprintf("%s/%s", storagePath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database", err)
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
}
utils.Done("Database has been restored")
} else {
utils.Fatal("Unknown file extension ", extension)
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
}
} else {
utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", storagePath, file))
}
utils.Fatal(fmt.Sprintf("File not found in %s", fmt.Sprintf("%s/%s", tmpPath, file)))
}
}
func s3Restore(file, s3Path string) {
// Restore database from S3
MountS3Storage(s3Path)
RestoreDatabase(file)
}

View File

@@ -1,77 +0,0 @@
// Package pkg /*
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda.gmail.com>
*/
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"os/exec"
)
var (
accessKey = ""
secretKey = ""
bucketName = ""
s3Endpoint = ""
)
func S3Mount() {
MountS3Storage(s3Path)
}
// MountS3Storage Mount s3 storage using s3fs
func MountS3Storage(s3Path string) {
accessKey = os.Getenv("ACCESS_KEY")
secretKey = os.Getenv("SECRET_KEY")
bucketName = os.Getenv("BUCKETNAME")
s3Endpoint = os.Getenv("S3_ENDPOINT")
if accessKey == "" || secretKey == "" || bucketName == "" {
utils.Fatal("Please make sure all environment variables are set")
} else {
storagePath := fmt.Sprintf("%s%s", s3MountPath, s3Path)
err := os.Setenv("STORAGE_PATH", storagePath)
if err != nil {
return
}
//Write file
err = utils.WriteToFile(s3fsPasswdFile, fmt.Sprintf("%s:%s", accessKey, secretKey))
if err != nil {
utils.Fatal("Error creating file")
}
//Change file permission
utils.ChangePermission(s3fsPasswdFile, 0600)
//Mount object storage
utils.Info("Mounting Object storage in ", s3MountPath)
if isEmpty, _ := utils.IsDirEmpty(s3MountPath); isEmpty {
cmd := exec.Command("s3fs", bucketName, s3MountPath,
"-o", "passwd_file="+s3fsPasswdFile,
"-o", "use_cache=/tmp/s3cache",
"-o", "allow_other",
"-o", "url="+s3Endpoint,
"-o", "use_path_request_style",
)
if err := cmd.Run(); err != nil {
utils.Fatal("Error mounting Object storage:", err)
}
if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
utils.Fatalf("Error creating directory %v %v", storagePath, err)
}
} else {
utils.Info("Object storage already mounted in " + s3MountPath)
if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
utils.Fatal("Error creating directory "+storagePath, err)
}
}
}
}

117
pkg/scp.go Normal file
View File

@@ -0,0 +1,117 @@
package pkg
import (
"context"
"errors"
"fmt"
"github.com/bramvdbogaerde/go-scp"
"github.com/bramvdbogaerde/go-scp/auth"
"github.com/jkaninda/mysql-bkup/utils"
"golang.org/x/crypto/ssh"
"golang.org/x/exp/slog"
"os"
"path/filepath"
)
func CopyToRemote(fileName, remotePath string) error {
sshUser := os.Getenv("SSH_USER")
sshPassword := os.Getenv("SSH_PASSWORD")
sshHostName := os.Getenv("SSH_HOST_NAME")
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
// SSSHVars Required environment variables for SSH remote server storage
var sshHVars = []string{
"SSH_USER",
"SSH_REMOTE_PATH",
"SSH_HOST_NAME",
"SSH_PORT",
}
err := utils.CheckEnvVars(sshHVars)
if err != nil {
slog.Error(fmt.Sprintf("Error checking environment variables\n: %s", err))
os.Exit(1)
}
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
} else {
if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
}
slog.Warn("Accessing the remote server using password, password is not recommended\n")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
// Connect to the remote server
err = client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n")
}
// Open a file
file, _ := os.Open(filepath.Join(tmpPath, fileName))
// Close client connection after the file has been copied
defer client.Close()
// Close the file after it has been copied
defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
if err != nil {
fmt.Println("Error while copying file ")
return err
}
return nil
}
func CopyFromRemote(fileName, remotePath string) error {
sshUser := os.Getenv("SSH_USER")
sshPassword := os.Getenv("SSH_PASSWORD")
sshHostName := os.Getenv("SSH_HOST_NAME")
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
} else {
if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
}
slog.Warn("Accessing the remote server using password, password is not recommended\n")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
// Connect to the remote server
err := client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n")
}
// Close client connection after the file has been copied
defer client.Close()
file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
fmt.Println("Couldn't open the output file")
}
defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
if err != nil {
fmt.Println("Error while copying file ", err)
return err
}
return nil
}

View File

@@ -2,7 +2,7 @@ package pkg
// Package pkg /*
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda.gmail.com>
Copyright © 2024 Jonas Kaninda
*/
import (
"fmt"
@@ -11,14 +11,11 @@ import (
"os/exec"
)
const cronLogFile = "/var/log/mysql-bkup.log"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
func CreateCrontabScript(disableCompression bool, storage string) {
//task := "/usr/local/bin/backup_cron.sh"
touchCmd := exec.Command("touch", backupCronFile)
if err := touchCmd.Run(); err != nil {
utils.Fatalf("Error creating file %s: %v\n", backupCronFile, err)
utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
}
var disableC = ""
if disableCompression {
@@ -40,31 +37,36 @@ bkup backup --dbname %s --port %s %v
}
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
utils.Fatalf("Error writing to %s: %v\n", backupCronFile, err)
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
}
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
if err := chmodCmd.Run(); err != nil {
utils.Fatalf("Error changing permissions of %s: %v\n", backupCronFile, err)
utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
}
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
if err := lnCmd.Run(); err != nil {
utils.Fatalf("Error creating symbolic link: %v\n", err)
utils.Fatal("Error creating symbolic link: %v\n", err)
}
touchLogCmd := exec.Command("touch", cronLogFile)
if err := touchLogCmd.Run(); err != nil {
utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
}
cronJob := "/etc/cron.d/backup_cron"
touchCronCmd := exec.Command("touch", cronJob)
if err := touchCronCmd.Run(); err != nil {
utils.Fatalf("Error creating file %s: %v\n", cronJob, err)
utils.Fatal("Error creating file %s: %v\n", cronJob, err)
}
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
utils.Fatalf("Error writing to %s: %v\n", cronJob, err)
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
}
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
@@ -72,5 +74,5 @@ bkup backup --dbname %s --port %s %v
if err := crontabCmd.Run(); err != nil {
utils.Fatal("Error updating crontab: ", err)
}
utils.Info("Starting backup in scheduled mode")
utils.Info("Backup job created.")
}

View File

@@ -1,16 +1,21 @@
package pkg
const s3MountPath string = "/s3mnt"
const s3fsPasswdFile string = "/etc/passwd-s3fs"
const cronLogFile = "/var/log/mysql-bkup.log"
const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
const algorithm = "aes256"
const gpgExtension = "gpg"
var (
storage = "local"
file = ""
s3Path = "/mysql-bkup"
dbPassword = ""
dbUserName = ""
dbName = ""
dbHost = ""
dbPort = "3306"
executionMode = "default"
storagePath = "/backup"
disableCompression = false
encryption = false
)

View File

@@ -1,11 +1,5 @@
package utils
const Notice = "Please remove --operation flag.\n" +
"Use: \n" +
"- backup for database backup operation [eg: bkup backup -d database_name ...]\n" +
"- restore for database restore operation [eg. bkup restore -d database_name ...]\n" +
"Example: bkup backup --storage s3 ...( instead of < bkup --operation backup >)\n" +
"We are sorry for this inconvenient\n"
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
"bkup restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" +

171
utils/s3.go Normal file
View File

@@ -0,0 +1,171 @@
package utils
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"golang.org/x/exp/slog"
"log"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
)
// CreateSession creates a new AWS session
func CreateSession() (*session.Session, error) {
// AwsVars Required environment variables for AWS S3 storage
var awsVars = []string{
"AWS_S3_ENDPOINT",
"AWS_S3_BUCKET_NAME",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY",
"AWS_REGION",
"AWS_REGION",
"AWS_REGION",
}
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
region := os.Getenv("AWS_REGION")
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
}
err = CheckEnvVars(awsVars)
if err != nil {
slog.Error(fmt.Sprintf("Error checking environment variables\n: %s", err))
os.Exit(1)
}
// S3 Config
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
Endpoint: aws.String(endPoint),
Region: aws.String(region),
DisableSSL: aws.Bool(awsDisableSsl),
S3ForcePathStyle: aws.Bool(true),
}
return session.NewSession(s3Config)
}
// UploadFileToS3 uploads a file to S3 with a given prefix
func UploadFileToS3(filePath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
file, err := os.Open(filepath.Join(filePath, key))
if err != nil {
return err
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
objectKey := filepath.Join(prefix, key)
buffer := make([]byte, fileInfo.Size())
file.Read(buffer)
fileBytes := bytes.NewReader(buffer)
fileType := http.DetectContentType(buffer)
_, err = svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
Body: fileBytes,
ContentLength: aws.Int64(fileInfo.Size()),
ContentType: aws.String(fileType),
})
if err != nil {
return err
}
return nil
}
func DownloadFile(destinationPath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
Info("Download backup from S3 storage...")
file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil {
fmt.Println("Failed to create file", err)
return err
}
defer file.Close()
objectKey := filepath.Join(prefix, key)
downloader := s3manager.NewDownloader(sess)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
if err != nil {
fmt.Println("Failed to download file", err)
return err
}
Info(fmt.Sprintf("Backup downloaded: ", file.Name(), " bytes size ", numBytes))
return nil
}
func DeleteOldBackup(bucket, prefix string, retention int) error {
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
// Get the current time and the time threshold for 7 days ago
now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retention)
// List objects in the bucket
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
}
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) {
// Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: object.Key,
})
if err != nil {
log.Printf("Failed to delete object %s: %v", *object.Key, err)
} else {
fmt.Printf("Deleted object %s\n", *object.Key)
}
}
}
return !lastPage
})
if err != nil {
log.Fatalf("Failed to list objects: %v", err)
}
fmt.Println("Finished deleting old files.")
return nil
}

View File

@@ -7,25 +7,51 @@ package utils
* @link https://github.com/jkaninda/mysql-bkup
**/
import (
"bytes"
"fmt"
"github.com/spf13/cobra"
"golang.org/x/exp/slog"
"io"
"io/fs"
"os"
"os/exec"
)
func Info(v ...any) {
fmt.Println("⒤ ", fmt.Sprint(v...))
func Info(msg string, args ...any) {
if len(args) == 0 {
slog.Info(msg)
} else {
slog.Info(fmt.Sprintf(msg, args...))
}
func Done(v ...any) {
fmt.Println("✔ ", fmt.Sprint(v...))
}
func Fatal(v ...any) {
fmt.Println("✘ ", fmt.Sprint(v...))
os.Exit(1)
func Worn(msg string, args ...any) {
if len(args) == 0 {
slog.Warn(msg)
} else {
slog.Warn(fmt.Sprintf(msg, args...))
}
}
func Error(msg string, args ...any) {
if len(args) == 0 {
slog.Error(msg)
} else {
slog.Error(fmt.Sprintf(msg, args...))
}
}
func Done(msg string, args ...any) {
if len(args) == 0 {
slog.Info(msg)
} else {
slog.Info(fmt.Sprintf(msg, args...))
}
}
func Fatal(msg string, args ...any) {
// Fatal logs an error message and exits the program.
if len(args) == 0 {
slog.Error(msg)
} else {
slog.Error(fmt.Sprintf(msg, args...))
}
func Fatalf(msg string, v ...any) {
fmt.Printf("✘ "+msg, v...)
os.Exit(1)
}
@@ -47,9 +73,45 @@ func WriteToFile(filePath, content string) error {
_, err = file.WriteString(content)
return err
}
func DeleteFile(filePath string) error {
err := os.Remove(filePath)
if err != nil {
return fmt.Errorf("failed to delete file: %v", err)
}
return nil
}
func CopyFile(src, dst string) error {
// Open the source file for reading
sourceFile, err := os.Open(src)
if err != nil {
return fmt.Errorf("failed to open source file: %v", err)
}
defer sourceFile.Close()
// Create the destination file
destinationFile, err := os.Create(dst)
if err != nil {
return fmt.Errorf("failed to create destination file: %v", err)
}
defer destinationFile.Close()
// Copy the content from source to destination
_, err = io.Copy(destinationFile, sourceFile)
if err != nil {
return fmt.Errorf("failed to copy file: %v", err)
}
// Flush the buffer to ensure all data is written
err = destinationFile.Sync()
if err != nil {
return fmt.Errorf("failed to sync destination file: %v", err)
}
return nil
}
func ChangePermission(filePath string, mod int) {
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
Fatalf("Error changing permissions of %s: %v\n", filePath, err)
Fatal("Error changing permissions of %s: %v\n", filePath, err)
}
}
@@ -69,15 +131,31 @@ func IsDirEmpty(name string) (bool, error) {
// TestDatabaseConnection tests the database connection
func TestDatabaseConnection() {
Info("Testing database connection...")
// Test database connection
cmd := exec.Command("mysql", "-h", os.Getenv("DB_HOST"), "-P", os.Getenv("DB_PORT"), "-u", os.Getenv("DB_USERNAME"), "--password="+os.Getenv("DB_PASSWORD"), os.Getenv("DB_NAME"), "-e", "quit")
dbHost := os.Getenv("DB_HOST")
dbPassword := os.Getenv("DB_PASSWORD")
dbUserName := os.Getenv("DB_USERNAME")
dbName := os.Getenv("DB_NAME")
dbPort := os.Getenv("DB_PORT")
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
Fatal("Please make sure all required database environment variables are set")
} else {
Info("Connecting to database ...")
cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Run()
if err != nil {
Fatal("Error testing database connection:", err)
slog.Error(fmt.Sprintf("Error testing database connection: %v\nOutput: %s\n", err, out.String()))
os.Exit(1)
}
Info("Successfully connected to database")
}
}
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
value, _ := cmd.Flags().GetString(flagName)
@@ -109,6 +187,37 @@ func SetEnv(key, value string) {
return
}
}
func GetEnvVariable(envName, oldEnvName string) string {
value := os.Getenv(envName)
if value == "" {
value = os.Getenv(oldEnvName)
if value != "" {
err := os.Setenv(envName, value)
if err != nil {
return value
}
Worn("%s is deprecated, please use %s instead!\n", oldEnvName, envName)
}
}
return value
}
func ShowHistory() {
}
// CheckEnvVars checks if all the specified environment variables are set
func CheckEnvVars(vars []string) error {
missingVars := []string{}
for _, v := range vars {
if os.Getenv(v) == "" {
missingVars = append(missingVars, v)
}
}
if len(missingVars) > 0 {
return fmt.Errorf("missing environment variables: %v", missingVars)
}
return nil
}