diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml
new file mode 100644
index 0000000..17ae6ed
--- /dev/null
+++ b/.github/workflows/deploy-docs.yml
@@ -0,0 +1,55 @@
+name: Deploy Documenation site to GitHub Pages
+
+on:
+ push:
+ branches: ['main']
+ paths:
+ - 'docs/**'
+ - '.github/workflows/deploy-docs.yml'
+ workflow_dispatch:
+
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+concurrency:
+ group: 'pages'
+ cancel-in-progress: true
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Setup Ruby
+ uses: ruby/setup-ruby@v1
+ with:
+ ruby-version: '3.2'
+ bundler-cache: true
+ cache-version: 0
+ working-directory: docs
+ - name: Setup Pages
+ id: pages
+ uses: actions/configure-pages@v2
+ - name: Build with Jekyll
+ working-directory: docs
+ run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
+ env:
+ JEKYLL_ENV: production
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v1
+ with:
+ path: 'docs/_site/'
+
+ deploy:
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ runs-on: ubuntu-latest
+ needs: build
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v1
\ No newline at end of file
diff --git a/.github/workflows/build.yml b/.github/workflows/release.yml
similarity index 56%
rename from .github/workflows/build.yml
rename to .github/workflows/release.yml
index 6d35878..206af8e 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/release.yml
@@ -1,19 +1,16 @@
-name: Build
+name: Release
on:
push:
- branches: [ "main" ]
- workflow_dispatch:
- inputs:
- docker_tag:
- description: 'Docker tag'
- required: true
- default: 'latest'
- type: string
+ tags:
+ - v**
env:
BUILDKIT_IMAGE: jkaninda/mysql-bkup
jobs:
docker:
runs-on: ubuntu-latest
+ permissions:
+ packages: write
+ contents: read
steps:
-
name: Set up QEMU
@@ -27,13 +24,22 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
+ - name: Log in to GHCR
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
-
name: Build and push
uses: docker/build-push-action@v3
with:
push: true
file: "./docker/Dockerfile"
- platforms: linux/amd64,linux/arm64
+ platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: |
- "${{env.BUILDKIT_IMAGE}}:v0.7"
+ "${{env.BUILDKIT_IMAGE}}:${{env.GITHUB_REF_NAME}}"
"${{env.BUILDKIT_IMAGE}}:latest"
+ "ghcr.io/${{env.BUILDKIT_IMAGE}}:${{env.GITHUB_REF_NAME}}"
+ "ghcr.io/${{env.BUILDKIT_IMAGE}}:latest"
+
diff --git a/Makefile b/Makefile
index be1549f..834a9c9 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@ BINARY_NAME=mysql-bkup
include .env
export
run:
- go run .
+ go run . backup
build:
go build -o bin/${BINARY_NAME} .
@@ -17,16 +17,30 @@ docker-build:
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
docker-run: docker-build
- docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2
+ docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2
+docker-restore: docker-build
+ docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore -f ${FILE_NAME}
docker-run-scheduled: docker-build
- docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -v "./backup:/backup" jkaninda/mysql-bkup bkup backup --prune --keep-last=2 --mode scheduled --period "* * * * *"
+ docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --mode scheduled --period "* * * * *"
docker-run-scheduled-s3: docker-build
- docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
+ docker run --rm --network web --user 1000:1000 --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
+
+docker-run-s3: docker-build
+ docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --path /custom-path
+
docker-restore-s3: docker-build
- docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "FILE_NAME=${FILE_NAME}" jkaninda/mysql-bkup bkup restore --storage s3 --path /custom-path
+ docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore --storage s3 -f ${FILE_NAME} --path /custom-path
+docker-run-ssh: docker-build
+ docker run --rm --network web -v "${SSH_IDENTIFY_FILE_LOCAL}:" --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage ssh
+
+docker-restore-ssh: docker-build
+ docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/mysql-bkup bkup restore --storage ssh -f ${FILE_NAME}
+
+run-docs:
+ cd docs && bundle exec jekyll serve -H 0.0.0.0 -t
\ No newline at end of file
diff --git a/README.md b/README.md
index a06d830..a8919d1 100644
--- a/README.md
+++ b/README.md
@@ -1,338 +1,95 @@
# MySQL Backup
-MySQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage.
+mysql-bkup is a Docker container image that can be used to backup and restore Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
+It also supports __encrypting__ your backups using GPG.
-[](https://github.com/jkaninda/mysql-bkup/actions/workflows/build.yml)
+The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
+It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
+
+It also supports __encrypting__ your backups using GPG.
+
+[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)


-
-
-
-
-
-
-> Runs on:
- Docker
- Kubernetes
-> Links:
+## Documentation is found at
+
+
+## Links:
+
- [Docker Hub](https://hub.docker.com/r/jkaninda/mysql-bkup)
- [Github](https://github.com/jkaninda/mysql-bkup)
-## PostgreSQL solution :
-
-- [PostgreSQL](https://github.com/jkaninda/pg-bkup)
-
+## MySQL solution :
+- [MySQL](https://github.com/jkaninda/mysql-bkup)
## Storage:
-- local
-- s3
-- Object storage
+- Local
+- AWS S3 or any S3 Alternatives for Object Storage
+- SSH
-## Volumes:
+## Quickstart
-- /s3mnt => S3 mounting path
-- /backup => local storage mounting path
+### Simple backup using Docker CLI
-## Usage
+To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
-| Options | Shorts | Usage |
-|-----------------------|--------|------------------------------------------------------------------------|
-| mysql-bkup | bkup | CLI utility |
-| backup | | Backup database operation |
-| restore | | Restore database operation |
-| history | | Show the history of backup |
-| --storage | -s | Set storage. local or s3 (default: local) |
-| --file | -f | Set file name for restoration |
-| --path | | Set s3 path without file name. eg: /custom_path |
-| --dbname | -d | Set database name |
-| --port | -p | Set database port (default: 3306) |
-| --mode | -m | Set execution mode. default or scheduled (default: default) |
-| --disable-compression | | Disable database backup compression |
-| --prune | | Delete old backup, default disabled |
-| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
-| --period | | Set crontab period for scheduled mode only. (default: "0 1 * * *") |
-| --timeout | -t | Set timeout (default: 60s) |
-| --help | -h | Print this help message and exit |
-| --version | -V | Print version information and exit |
-
-
-## Environment variables
-
-| Name | Requirement | Description |
-|-------------|--------------------------------------------------|-----------------------------------------------------|
-| DB_PORT | Optional, default 3306 | Database port number |
-| DB_HOST | Required | Database host |
-| DB_NAME | Optional if it was provided from the -d flag | Database name |
-| DB_USERNAME | Required | Database user name |
-| DB_PASSWORD | Required | Database password |
-| ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
-| SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
-| BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
-| S3_ENDPOINT | Optional, required for S3 storage | AWS S3 Endpoint |
-| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz |
-
-## Note:
-
-Creating a user for backup tasks who has read-only access is recommended!
-
-> create read-only user
-
-```sh
-mysql -u root -p
+```shell
+ docker run --rm --network your_network_name \
+ -v $PWD/backup:/backup/ \
+ -e "DB_HOST=dbhost" \
+ -e "DB_USERNAME=username" \
+ -e "DB_PASSWORD=password" \
+ jkaninda/mysql-bkup mysql-bkup backup -d database_name
```
-```sql
-CREATE USER read_only_user IDENTIFIED BY 'your_strong_password';
+Alternatively, pass a `--env-file` in order to use a full config as described below.
-```
-```sql
-GRANT SELECT, SHOW VIEW ON *.* TO read_only_user;
-```
-```sql
-FLUSH PRIVILEGES;
-```
+### Simple backup in docker compose file
-## Backup database :
-
-Simple backup usage
-
-```sh
-mysql-bkup backup --dbname database_name
-```
-```sh
-mysql-bkup backup -d database_name
-```
-### S3
-
-```sh
-mysql-bkup backup --storage s3 --dbname database_name
-```
-## Docker run:
-
-```sh
-docker run --rm --network your_network_name --name mysql-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/mysql-bkup:latest mysql-bkup backup -d database_name
-```
-
-## Docker compose file:
```yaml
-version: '3'
services:
- mariadb:
- container_name: mariadb
- image: mariadb
- environment:
- MYSQL_DATABASE: mariadb
- MYSQL_USER: mariadb
- MYSQL_PASSWORD: password
- MYSQL_ROOT_PASSWORD: password
mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- - mysql-bkup backup -d database_name
+ - mysql-bkup backup
volumes:
- ./backup:/backup
environment:
- - DB_PORT=3306
- - DB_HOST=mariadb
- - DB_USERNAME=mariadb
+ - DB_PORT=5432
+ - DB_HOST=postgres
+ - DB_NAME=foo
+ - DB_USERNAME=bar
- DB_PASSWORD=password
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
```
-## Restore database :
+## Deploy on Kubernetes
-Simple database restore operation usage
+For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
-```sh
-mysql-bkup restore --dbname database_name --file database_20231217_115621.sql
-```
-
-```sh
-mysql-bkup restore -f database_20231217_115621.sql
-```
-### S3
-
-```sh
-mysql-bkup restore --storage s3 --file database_20231217_115621.sql
-```
-
-## Docker run:
-
-```sh
-docker run --rm --network your_network_name --name mysql-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/mysql-bkup mysql-bkup backup -d database_name -f db_20231219_022941.sql.gz
-```
-
-## Docker compose file:
-
-```yaml
-version: '3'
-services:
- mariadb:
- container_name: mariadb
- image: mariadb:latest
- environment:
- MYSQL_DATABASE: mariadb
- MYSQL_USER: mariadb
- MYSQL_PASSWORD: password
- MYSQL_ROOT_PASSWORD: password
- mysql-bkup:
- image: jkaninda/mysql-bkup
- container_name: mysql-bkup
- command:
- - /bin/sh
- - -c
- - mysql-bkup restore --file database_20231217_115621.sql --dbname database_name
- volumes:
- - ./backup:/backup
- environment:
- #- FILE_NAME=mariadb_20231217_040238.sql # Optional if file name is set from command
- - DB_PORT=3306
- - DB_HOST=mariadb
- - DB_NAME=mariadb
- - DB_USERNAME=mariadb
- - DB_PASSWORD=password
-```
-## Run
-
-```sh
-docker-compose up -d
-```
-## Backup to S3
-
-```sh
-docker run --rm --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=db_hostname" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" -e "ACCESS_KEY=your_access_key" -e "SECRET_KEY=your_secret_key" -e "BUCKETNAME=your_bucket_name" -e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" jkaninda/mysql-bkup mysql-bkup backup -s s3 -d database_name
-```
-> To change s3 backup path add this flag : --path /myPath . default path is /mysql_bkup
-
-Simple S3 backup usage
-
-```sh
-bkup backup --storage s3 --dbname mydatabase
-```
-```yaml
-version: '3'
-services:
- mysql-bkup:
- image: jkaninda/mysql-bkup
- container_name: mysql-bkup
- privileged: true
- devices:
- - "/dev/fuse"
- command:
- - /bin/sh
- - -c
- - mysql-bkup restore --storage s3 -f database_20231217_115621.sql.gz
- environment:
- - DB_PORT=3306
- - DB_HOST=mysql
- - DB_NAME=mariadb
- - DB_USERNAME=mariadb
- - DB_PASSWORD=password
- - ACCESS_KEY=${ACCESS_KEY}
- - SECRET_KEY=${SECRET_KEY}
- - BUCKET_NAME=${BUCKET_NAME}
- - S3_ENDPOINT=${S3_ENDPOINT}
-
-```
-## Run in Scheduled mode
-
-This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
-For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
-
-Make an automated backup on Docker
-
-## Syntax of crontab (field description)
-
-The syntax is:
-
-- 1: Minute (0-59)
-- 2: Hours (0-23)
-- 3: Day (0-31)
-- 4: Month (0-12 [12 == December])
-- 5: Day of the week(0-7 [7 or 0 == sunday])
-
-Easy to remember format:
-
-```conf
-* * * * * command to be executed
-```
-
-```conf
-- - - - -
-| | | | |
-| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
-| | | ------- Month (1 - 12)
-| | --------- Day of month (1 - 31)
-| ----------- Hour (0 - 23)
-------------- Minute (0 - 59)
-```
-
-> At every 30th minute
-
-```conf
-*/30 * * * *
-```
-> “At minute 0.” every hour
-```conf
-0 * * * *
-```
-
-> “At 01:00.” every day
-
-```conf
-0 1 * * *
-```
-
-## Example of scheduled mode
-
-> Docker run :
-
-```sh
-docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup mysql-bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *"
-```
-
-> With Docker compose
-
-```yaml
-version: "3"
-services:
- mysql-bkup:
- image: jkaninda/mysql-bkup
- container_name: mysql-bkup
- privileged: true
- devices:
- - "/dev/fuse"
- command:
- - /bin/sh
- - -c
- - mysql-bkup backup --storage s3 --path /mys3_custome_path --dbname database_name --mode scheduled --period "*/30 * * * *"
- environment:
- - DB_PORT=3306
- - DB_HOST=mysqlhost
- - DB_USERNAME=userName
- - DB_PASSWORD=${DB_PASSWORD}
- - ACCESS_KEY=${ACCESS_KEY}
- - SECRET_KEY=${SECRET_KEY}
- - BUCKET_NAME=${BUCKET_NAME}
- - S3_ENDPOINT=${S3_ENDPOINT}
-```
-
-
-## Kubernetes CronJob
-For Kubernetes you don't need to run it in scheduled mode.
-
-Simple Kubernetes CronJob usage:
+### Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
- name: mysql-bkup-job
+ name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
@@ -342,15 +99,13 @@ spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
- securityContext:
- privileged: true
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
- value: "3306"
+ value: "5432"
- name: DB_HOST
value: ""
- name: DB_NAME
@@ -359,22 +114,48 @@ spec:
value: ""
# Please use secret!
- name: DB_PASSWORD
- value: "password"
- - name: ACCESS_KEY
value: ""
- - name: SECRET_KEY
- value: ""
- - name: BUCKET_NAME
- value: ""
- - name: S3_ENDPOINT
- value: "https://s3.us-west-2.amazonaws.com"
+ - name: AWS_S3_ENDPOINT
+ value: "https://s3.amazonaws.com"
+ - name: AWS_S3_BUCKET_NAME
+ value: "xxx"
+ - name: AWS_REGION
+ value: "us-west-2"
+ - name: AWS_ACCESS_KEY
+ value: "xxxx"
+ - name: AWS_SECRET_KEY
+ value: "xxxx"
+ - name: AWS_DISABLE_SSL
+ value: "false"
restartPolicy: Never
```
+## Available image registries
-## Contributing
+This Docker image is published to both Docker Hub and the GitHub container registry.
+Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
+
+```
+docker pull jkaninda/mysql-bkup:v1.0
+docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
+```
+
+Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
+
+## Supported Engines
+
+This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
+While it may work against different implementations, there are no guarantees about support for non-Docker engines.
+
+## References
+
+We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
+
+- The original image is based on `ubuntu` and requires additional tools, making it heavy.
+- This image is written in Go.
+- `arm64` and `arm/v7` architectures are supported.
+- Docker in Swarm mode is supported.
+- Kubernetes is supported.
-Contributions are welcome! If you encounter any issues or have suggestions for improvements, please create an issue or submit a pull request.
-Make sure to follow the existing coding style and provide tests for your changes.
## License
diff --git a/cmd/backup.go b/cmd/backup.go
index 6f598b1..1319e7e 100644
--- a/cmd/backup.go
+++ b/cmd/backup.go
@@ -21,8 +21,8 @@ var BackupCmd = &cobra.Command{
func init() {
//Backup
- BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Set execution mode. default or scheduled")
- BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Set schedule period time")
+ BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
+ BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
diff --git a/cmd/history.go b/cmd/history.go
deleted file mode 100644
index 2f4c67e..0000000
--- a/cmd/history.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package cmd
-
-import (
- "github.com/jkaninda/mysql-bkup/utils"
- "github.com/spf13/cobra"
-)
-
-var HistoryCmd = &cobra.Command{
- Use: "history",
- Short: "Show the history of backup",
- Run: func(cmd *cobra.Command, args []string) {
- utils.ShowHistory()
- },
-}
diff --git a/cmd/root.go b/cmd/root.go
index 6e07b06..2237b0a 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -19,7 +19,6 @@ var rootCmd = &cobra.Command{
Version: appVersion,
}
var operation = ""
-var s3Path = "/mysql-bkup"
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
@@ -31,16 +30,13 @@ func Execute() {
}
func init() {
- rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3")
- rootCmd.PersistentFlags().StringP("path", "P", s3Path, "Set s3 path, without file name. for S3 storage only")
- rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name")
- rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout")
- rootCmd.PersistentFlags().IntP("port", "p", 3306, "Set database port")
+ rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
+ rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
+ rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
+ rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
rootCmd.AddCommand(VersionCmd)
rootCmd.AddCommand(BackupCmd)
rootCmd.AddCommand(RestoreCmd)
- rootCmd.AddCommand(S3MountCmd)
- rootCmd.AddCommand(HistoryCmd)
}
diff --git a/cmd/s3mount.go b/cmd/s3mount.go
deleted file mode 100644
index 61f2c00..0000000
--- a/cmd/s3mount.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package cmd
-
-import (
- "github.com/jkaninda/mysql-bkup/pkg"
- "github.com/spf13/cobra"
-)
-
-var S3MountCmd = &cobra.Command{
- Use: "s3mount",
- Short: "Mount AWS S3 storage",
- Run: func(cmd *cobra.Command, args []string) {
- pkg.S3Mount()
- },
-}
diff --git a/docker/Dockerfile b/docker/Dockerfile
index f189182..309e6f3 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.21.0 AS build
+FROM golang:1.22.5 AS build
WORKDIR /app
# Copy the source code.
@@ -16,35 +16,51 @@ ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT="3306"
ENV STORAGE=local
-ENV BUCKET_NAME=""
-ENV ACCESS_KEY=""
-ENV SECRET_KEY=""
-ENV S3_ENDPOINT=https://s3.amazonaws.com
+ENV AWS_S3_ENDPOINT=""
+ENV AWS_S3_BUCKET_NAME=""
+ENV AWS_ACCESS_KEY=""
+ENV AWS_SECRET_KEY=""
+ENV AWS_REGION="us-west-2"
+ENV AWS_DISABLE_SSL="false"
+ENV GPG_PASSPHRASE=""
+ENV SSH_USER=""
+ENV SSH_REMOTE_PATH=""
+ENV SSH_PASSWORD=""
+ENV SSH_HOST_NAME=""
+ENV SSH_IDENTIFY_FILE=""
+ENV SSH_PORT="22"
ARG DEBIAN_FRONTEND=noninteractive
-ENV VERSION="v0.7"
-LABEL authors="Jonas Kaninda"
+ENV VERSION="v1.0"
+ARG WORKDIR="/app"
+ARG BACKUPDIR="/backup"
+ARG BACKUP_TMP_DIR="/tmp/backup"
+ARG BACKUP_CRON="/etc/cron.d/backup_cron"
+ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
+LABEL author="Jonas Kaninda"
RUN apt-get update -qq
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
-RUN apt install s3fs mysql-client supervisor cron -y
+RUN apt install mysql-client supervisor cron gnupg -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
-RUN mkdir /s3mnt
-RUN mkdir /tmp/s3cache
-RUN chmod 777 /s3mnt
-RUN chmod 777 /tmp/s3cache
+RUN mkdir $WORKDIR
+RUN mkdir $BACKUPDIR
+RUN mkdir -p $BACKUP_TMP_DIR
+RUN chmod 777 $WORKDIR
+RUN chmod 777 $BACKUPDIR
+RUN chmod 777 $BACKUP_TMP_DIR
+RUN touch $BACKUP_CRON && \
+ touch $BACKUP_CRON_SCRIPT && \
+ chmod 777 $BACKUP_CRON && \
+ chmod 777 $BACKUP_CRON_SCRIPT
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
-RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/mysql_bkup
-
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
-
-RUN mkdir /backup
-WORKDIR /backup
\ No newline at end of file
+WORKDIR $WORKDIR
\ No newline at end of file
diff --git a/docs/.gitignore b/docs/.gitignore
new file mode 100644
index 0000000..45c1505
--- /dev/null
+++ b/docs/.gitignore
@@ -0,0 +1,3 @@
+_site
+.sass-cache
+.jekyll-metadata
diff --git a/docs/404.html b/docs/404.html
new file mode 100644
index 0000000..c472b4e
--- /dev/null
+++ b/docs/404.html
@@ -0,0 +1,24 @@
+---
+layout: default
+---
+
+
+
+
+
404
+
+
Page not found :(
+
The requested page could not be found.
+
diff --git a/docs/Dockerfile b/docs/Dockerfile
new file mode 100644
index 0000000..5e1108f
--- /dev/null
+++ b/docs/Dockerfile
@@ -0,0 +1,12 @@
+FROM ruby:3.3.4
+
+ENV LC_ALL C.UTF-8
+ENV LANG en_US.UTF-8
+ENV LANGUAGE en_US.UTF-8
+
+WORKDIR /usr/src/app
+
+COPY . ./
+RUN gem install bundler && bundle install
+
+EXPOSE 4000
\ No newline at end of file
diff --git a/docs/Gemfile b/docs/Gemfile
new file mode 100644
index 0000000..b499c80
--- /dev/null
+++ b/docs/Gemfile
@@ -0,0 +1,43 @@
+source "https://rubygems.org"
+
+# Hello! This is where you manage which Jekyll version is used to run.
+# When you want to use a different version, change it below, save the
+# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
+#
+# bundle exec jekyll serve
+#
+# This will help ensure the proper Jekyll version is running.
+# Happy Jekylling!
+gem "jekyll", "~> 3.10.0"
+
+# This is the default theme for new Jekyll sites. You may change this to anything you like.
+gem "minima", "~> 2.0"
+
+# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
+# uncomment the line below. To umysqlrade, run `bundle update github-pages`.
+# gem "github-pages", group: :jekyll_plugins
+
+# If you have any plugins, put them here!
+group :jekyll_plugins do
+ gem "jekyll-feed", "~> 0.6"
+end
+
+# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
+# and associated library.
+platforms :mingw, :x64_mingw, :mswin, :jruby do
+ gem "tzinfo", ">= 1", "< 3"
+ gem "tzinfo-data"
+end
+
+# Performance-booster for watching directories on Windows
+gem "wdm", "~> 0.1.0", :install_if => Gem.win_platform?
+
+# kramdown v2 ships without the gfm parser by default. If you're using
+# kramdown v1, comment out this line.
+gem "kramdown-parser-gfm"
+
+# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
+# do not have a Java counterpart.
+gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]
+gem "just-the-docs"
+
diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock
new file mode 100644
index 0000000..1bf9a5d
--- /dev/null
+++ b/docs/Gemfile.lock
@@ -0,0 +1,116 @@
+GEM
+ remote: https://rubygems.org/
+ specs:
+ addressable (2.8.7)
+ public_suffix (>= 2.0.2, < 7.0)
+ colorator (1.1.0)
+ concurrent-ruby (1.3.3)
+ csv (3.3.0)
+ em-websocket (0.5.3)
+ eventmachine (>= 0.12.9)
+ http_parser.rb (~> 0)
+ eventmachine (1.2.7)
+ ffi (1.17.0)
+ ffi (1.17.0-aarch64-linux-gnu)
+ ffi (1.17.0-aarch64-linux-musl)
+ ffi (1.17.0-arm-linux-gnu)
+ ffi (1.17.0-arm-linux-musl)
+ ffi (1.17.0-arm64-darwin)
+ ffi (1.17.0-x86-linux-gnu)
+ ffi (1.17.0-x86-linux-musl)
+ ffi (1.17.0-x86_64-darwin)
+ ffi (1.17.0-x86_64-linux-gnu)
+ ffi (1.17.0-x86_64-linux-musl)
+ forwardable-extended (2.6.0)
+ http_parser.rb (0.8.0)
+ i18n (1.14.5)
+ concurrent-ruby (~> 1.0)
+ jekyll (3.10.0)
+ addressable (~> 2.4)
+ colorator (~> 1.0)
+ csv (~> 3.0)
+ em-websocket (~> 0.5)
+ i18n (>= 0.7, < 2)
+ jekyll-sass-converter (~> 1.0)
+ jekyll-watch (~> 2.0)
+ kramdown (>= 1.17, < 3)
+ liquid (~> 4.0)
+ mercenary (~> 0.3.3)
+ pathutil (~> 0.9)
+ rouge (>= 1.7, < 4)
+ safe_yaml (~> 1.0)
+ webrick (>= 1.0)
+ jekyll-feed (0.17.0)
+ jekyll (>= 3.7, < 5.0)
+ jekyll-include-cache (0.2.1)
+ jekyll (>= 3.7, < 5.0)
+ jekyll-sass-converter (1.5.2)
+ sass (~> 3.4)
+ jekyll-seo-tag (2.8.0)
+ jekyll (>= 3.8, < 5.0)
+ jekyll-watch (2.2.1)
+ listen (~> 3.0)
+ just-the-docs (0.8.2)
+ jekyll (>= 3.8.5)
+ jekyll-include-cache
+ jekyll-seo-tag (>= 2.0)
+ rake (>= 12.3.1)
+ kramdown (2.4.0)
+ rexml
+ kramdown-parser-gfm (1.1.0)
+ kramdown (~> 2.0)
+ liquid (4.0.4)
+ listen (3.9.0)
+ rb-fsevent (~> 0.10, >= 0.10.3)
+ rb-inotify (~> 0.9, >= 0.9.10)
+ mercenary (0.3.6)
+ minima (2.5.1)
+ jekyll (>= 3.5, < 5.0)
+ jekyll-feed (~> 0.9)
+ jekyll-seo-tag (~> 2.1)
+ pathutil (0.16.2)
+ forwardable-extended (~> 2.6)
+ public_suffix (6.0.1)
+ rake (13.2.1)
+ rb-fsevent (0.11.2)
+ rb-inotify (0.11.1)
+ ffi (~> 1.0)
+ rexml (3.3.2)
+ strscan
+ rouge (3.30.0)
+ safe_yaml (1.0.5)
+ sass (3.7.4)
+ sass-listen (~> 4.0.0)
+ sass-listen (4.0.0)
+ rb-fsevent (~> 0.9, >= 0.9.4)
+ rb-inotify (~> 0.9, >= 0.9.7)
+ strscan (3.1.0)
+ wdm (0.1.1)
+ webrick (1.8.1)
+
+PLATFORMS
+ aarch64-linux-gnu
+ aarch64-linux-musl
+ arm-linux-gnu
+ arm-linux-musl
+ arm64-darwin
+ ruby
+ x86-linux-gnu
+ x86-linux-musl
+ x86_64-darwin
+ x86_64-linux-gnu
+ x86_64-linux-musl
+
+DEPENDENCIES
+ http_parser.rb (~> 0.6.0)
+ jekyll (~> 3.10.0)
+ jekyll-feed (~> 0.6)
+ just-the-docs
+ kramdown-parser-gfm
+ minima (~> 2.0)
+ tzinfo (>= 1, < 3)
+ tzinfo-data
+ wdm (~> 0.1.0)
+
+BUNDLED WITH
+ 2.5.16
diff --git a/docs/_config.yml b/docs/_config.yml
new file mode 100644
index 0000000..94ba758
--- /dev/null
+++ b/docs/_config.yml
@@ -0,0 +1,70 @@
+# Welcome to Jekyll!
+#
+# This config file is meant for settings that affect your whole blog, values
+# which you are expected to set up once and rarely edit after that. If you find
+# yourself editing this file very often, consider using Jekyll's data files
+# feature for the data you need to update frequently.
+#
+# For technical reasons, this file is *NOT* reloaded automatically when you use
+# 'bundle exec jekyll serve'. If you change this file, please restart the server process.
+
+# Site settings
+# These are used to personalize your new site. If you look in the HTML files,
+# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
+# You can create any custom variable you would like, and they will be accessible
+# in the templates via {{ site.myvariable }}.
+title: MySQL database backup
+email: hi@jonaskaninda.com
+description: >- # this means to ignore newlines until "baseurl:"
+ MySQL Backup and Restore Docker container image. Backup database to AWS S3 storage or SSH remote server.
+
+baseurl: "" # the subpath of your site, e.g. /blog
+url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
+twitter_username: jonaskaninda
+github_username: jkaninda
+
+callouts_level: quiet
+callouts:
+ highlight:
+ color: yellow
+ important:
+ title: Important
+ color: blue
+ new:
+ title: New
+ color: green
+ note:
+ title: Note
+ color: purple
+ warning:
+ title: Warning
+ color: red
+# Build settings
+markdown: kramdown
+theme: just-the-docs
+plugins:
+ - jekyll-feed
+aux_links:
+ 'GitHub Repository':
+ - https://github.com/jkaninda/mysql-bkup
+
+nav_external_links:
+ - title: GitHub Repository
+ url: https://github.com/jkaninda/mysql-bkup
+
+footer_content: >-
+ Copyright © 2024 Jonas Kaninda.
+ Distributed under the MIT License.
+ Something missing, unclear or not working? Open an issue.
+
+# Exclude from processing.
+# The following items will not be processed, by default. Create a custom list
+# to override the default setting.
+# exclude:
+# - Gemfile
+# - Gemfile2.lock
+# - node_modules
+# - vendor/bundle/
+# - vendor/cache/
+# - vendor/gems/
+# - vendor/ruby/
diff --git a/docs/_posts/2024-07-29-welcome-to-jekyll.markdown b/docs/_posts/2024-07-29-welcome-to-jekyll.markdown
new file mode 100644
index 0000000..6c2334f
--- /dev/null
+++ b/docs/_posts/2024-07-29-welcome-to-jekyll.markdown
@@ -0,0 +1,25 @@
+---
+layout: post
+title: "Welcome to Jekyll!"
+date: 2024-07-29 03:36:13 +0200
+categories: jekyll update
+---
+You’ll find this post in your `_posts` directory. Go ahead and edit it and re-build the site to see your changes. You can rebuild the site in many different ways, but the most common way is to run `jekyll serve`, which launches a web server and auto-regenerates your site when a file is updated.
+
+To add new posts, simply add a file in the `_posts` directory that follows the convention `YYYY-MM-DD-name-of-post.ext` and includes the necessary front matter. Take a look at the source for this post to get an idea about how it works.
+
+Jekyll also offers powerful support for code snippets:
+
+{% highlight ruby %}
+def print_hi(name)
+ puts "Hi, #{name}"
+end
+print_hi('Tom')
+#=> prints 'Hi, Tom' to STDOUT.
+{% endhighlight %}
+
+Check out the [Jekyll docs][jekyll-docs] for more info on how to get the most out of Jekyll. File all bugs/feature requests at [Jekyll’s GitHub repo][jekyll-gh]. If you have questions, you can ask them on [Jekyll Talk][jekyll-talk].
+
+[jekyll-docs]: https://jekyllrb.com/docs/home
+[jekyll-gh]: https://github.com/jekyll/jekyll
+[jekyll-talk]: https://talk.jekyllrb.com/
diff --git a/docs/docker-compose.yml b/docs/docker-compose.yml
new file mode 100644
index 0000000..5ceb7d5
--- /dev/null
+++ b/docs/docker-compose.yml
@@ -0,0 +1,13 @@
+services:
+ jekyll:
+ build:
+ context: ./
+ ports:
+ - 4000:4000
+ environment:
+ - JEKYLL_ENV=development
+ volumes:
+ - .:/usr/src/app
+ stdin_open: true
+ tty: true
+ command: bundle exec jekyll serve -H 0.0.0.0 -t
\ No newline at end of file
diff --git a/docs/how-tos/backup-to-s3.md b/docs/how-tos/backup-to-s3.md
new file mode 100644
index 0000000..6208332
--- /dev/null
+++ b/docs/how-tos/backup-to-s3.md
@@ -0,0 +1,139 @@
+---
+title: Backup to AWS S3
+layout: default
+parent: How Tos
+nav_order: 2
+---
+# Backup to AWS S3
+
+{: .note }
+As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3).
+You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag.
+
+
+## Backup to S3
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup --storage s3 -d database --path /my-custom-path
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=mysql
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ ## AWS configurations
+ - AWS_S3_ENDPOINT=https://s3.amazonaws.com
+ - AWS_S3_BUCKET_NAME=backup
+ - AWS_REGION="us-west-2"
+ - AWS_ACCESS_KEY=xxxx
+ - AWS_SECRET_KEY=xxxxx
+ ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
+ - AWS_DISABLE_SSL="false"
+
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
+
+### Recurring backups to S3
+
+As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
+In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=mysql
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ ## AWS configurations
+ - AWS_S3_ENDPOINT=https://s3.amazonaws.com
+ - AWS_S3_BUCKET_NAME=backup
+ - AWS_REGION="us-west-2"
+ - AWS_ACCESS_KEY=xxxx
+ - AWS_SECRET_KEY=xxxxx
+ ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
+ - AWS_DISABLE_SSL="false"
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
+
+## Deploy on Kubernetes
+
+For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
+
+### Simple Kubernetes CronJob usage:
+
+```yaml
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: bkup-job
+spec:
+ schedule: "0 1 * * *"
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: mysql-bkup
+ image: jkaninda/mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup -s s3 --path /custom_path
+ env:
+ - name: DB_PORT
+ value: "3306"
+ - name: DB_HOST
+ value: ""
+ - name: DB_NAME
+ value: ""
+ - name: DB_USERNAME
+ value: ""
+ # Please use secret!
+ - name: DB_PASSWORD
+ value: ""
+ - name: AWS_S3_ENDPOINT
+ value: "https://s3.amazonaws.com"
+ - name: AWS_S3_BUCKET_NAME
+ value: "xxx"
+ - name: AWS_REGION
+ value: "us-west-2"
+ - name: AWS_ACCESS_KEY
+ value: "xxxx"
+ - name: AWS_SECRET_KEY
+ value: "xxxx"
+ - name: AWS_DISABLE_SSL
+ value: "false"
+ restartPolicy: OnFailure
+```
\ No newline at end of file
diff --git a/docs/how-tos/backup-to-ssh.md b/docs/how-tos/backup-to-ssh.md
new file mode 100644
index 0000000..1581ee4
--- /dev/null
+++ b/docs/how-tos/backup-to-ssh.md
@@ -0,0 +1,146 @@
+---
+title: Backup to SSH
+layout: default
+parent: How Tos
+nav_order: 3
+---
+# Backup to SSH remote server
+
+
+As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`.
+You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
+
+{: .note }
+These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
+Accessing the remote server using password is not recommended, use private key instead.
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup --storage remote -d database
+ volumes:
+ - ./id_ed25519:/tmp/id_ed25519"
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=mysql
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ ## SSH config
+ - SSH_HOST_NAME="hostname"
+ - SSH_PORT=22
+ - SSH_USER=user
+ - SSH_REMOTE_PATH=/home/jkaninda/backups
+ - SSH_IDENTIFY_FILE=/tmp/id_ed25519
+ ## We advise you to use a private jey instead of password
+ #- SSH_PASSWORD=password
+
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
+
+
+### Recurring backups to SSH remote server
+
+As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
+You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup -d database --storage s3 --mode scheduled --period "0 1 * * *"
+ volumes:
+ - ./id_ed25519:/tmp/id_ed25519"
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=mysql
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ ## SSH config
+ - SSH_HOST_NAME="hostname"
+ - SSH_PORT=22
+ - SSH_USER=user
+ - SSH_REMOTE_PATH=/home/jkaninda/backups
+ - SSH_IDENTIFY_FILE=/tmp/id_ed25519
+ ## We advise you to use a private jey instead of password
+ #- SSH_PASSWORD=password
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
+
+## Deploy on Kubernetes
+
+For Kubernetes, you don't need to run it in scheduled mode.
+You can deploy it as CronJob.
+
+Simple Kubernetes CronJob usage:
+
+```yaml
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: bkup-job
+spec:
+ schedule: "0 1 * * *"
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: mysql-bkup
+ image: jkaninda/mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup -s s3 --path /custom_path
+ env:
+ - name: DB_PORT
+ value: "3306"
+ - name: DB_HOST
+ value: ""
+ - name: DB_NAME
+ value: ""
+ - name: DB_USERNAME
+ value: ""
+ # Please use secret!
+ - name: DB_PASSWORD
+ value: ""
+ - name: SSH_HOST_NAME
+ value: ""
+ - name: SSH_PORT
+ value: "22"
+ - name: SSH_USER
+ value: "xxx"
+ - name: SSH_REMOTE_PATH
+ value: "/home/jkaninda/backups"
+ - name: AWS_ACCESS_KEY
+ value: "xxxx"
+ - name: SSH_IDENTIFY_FILE
+ value: "/home/jkaninda/backups"
+ restartPolicy: OnFailure
+```
\ No newline at end of file
diff --git a/docs/how-tos/backup.md b/docs/how-tos/backup.md
new file mode 100644
index 0000000..9fbf3dd
--- /dev/null
+++ b/docs/how-tos/backup.md
@@ -0,0 +1,89 @@
+---
+title: Backup
+layout: default
+parent: How Tos
+nav_order: 1
+---
+
+# Backup database
+
+To backup the database, you need to add `backup` subcommand to `mysql-bkup` or `bkup`.
+
+{: .note }
+The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
+
+{: .warning }
+Creating a user for backup tasks who has read-only access is recommended!
+
+The backup process can be run in scheduled mode for the recurring backups.
+It handles __recurring__ backups of mysql database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup -d database
+ volumes:
+ - ./backup:/backup
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=mysql
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
+
+### Backup using Docker CLI
+
+```shell
+ docker run --rm --network your_network_name \
+ -v $PWD/backup:/backup/ \
+ -e "DB_HOST=dbhost" \
+ -e "DB_USERNAME=username" \
+ -e "DB_PASSWORD=password" \
+ jkaninda/mysql-bkup mysql-bkup backup -d database_name
+```
+
+In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup -d database --mode scheduled --period "0 1 * * *"
+ volumes:
+ - ./backup:/backup
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=mysql
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
+
diff --git a/docs/how-tos/encrypt-backup.md b/docs/how-tos/encrypt-backup.md
new file mode 100644
index 0000000..1c3a4f4
--- /dev/null
+++ b/docs/how-tos/encrypt-backup.md
@@ -0,0 +1,54 @@
+---
+title: Encrypt backups using GPG
+layout: default
+parent: How Tos
+nav_order: 7
+---
+# Encrypt backup
+
+The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
+
+{: .warning }
+To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
+
+To decrypt manually, you need to install gnupg
+
+### Decrypt backup
+
+```shell
+gpg --batch --passphrase "my-passphrase" \
+--output database_20240730_044201.sql.gz \
+--decrypt database_20240730_044201.sql.gz.gpg
+```
+
+### Backup
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup -d database
+ volumes:
+ - ./backup:/backup
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=mysql
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ ## Required to encrypt backup
+ - GPG_PASSPHRASE=my-secure-passphrase
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
\ No newline at end of file
diff --git a/docs/how-tos/index.md b/docs/how-tos/index.md
new file mode 100644
index 0000000..e869ec7
--- /dev/null
+++ b/docs/how-tos/index.md
@@ -0,0 +1,8 @@
+---
+title: How Tos
+layout: default
+nav_order: 3
+has_children: true
+---
+
+## How Tos
\ No newline at end of file
diff --git a/docs/how-tos/restore-from-s3.md b/docs/how-tos/restore-from-s3.md
new file mode 100644
index 0000000..4c10f14
--- /dev/null
+++ b/docs/how-tos/restore-from-s3.md
@@ -0,0 +1,51 @@
+---
+title: Restore database from AWS S3
+layout: default
+parent: How Tos
+nav_order: 5
+---
+
+# Restore database from S3 storage
+
+To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
+
+{: .note }
+It supports __.sql__ and __.sql.gz__ compressed file.
+
+### Restore
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
+ volumes:
+ - ./backup:/backup
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=mysql
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ ## AWS configurations
+ - AWS_S3_ENDPOINT=https://s3.amazonaws.com
+ - AWS_S3_BUCKET_NAME=backup
+ - AWS_REGION="us-west-2"
+ - AWS_ACCESS_KEY=xxxx
+ - AWS_SECRET_KEY=xxxxx
+ ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
+ - AWS_DISABLE_SSL="false"
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
\ No newline at end of file
diff --git a/docs/how-tos/restore-from-ssh.md b/docs/how-tos/restore-from-ssh.md
new file mode 100644
index 0000000..f2b7a25
--- /dev/null
+++ b/docs/how-tos/restore-from-ssh.md
@@ -0,0 +1,50 @@
+---
+title: Restore database from SSH
+layout: default
+parent: How Tos
+nav_order: 6
+---
+# Restore database from SSH remote server
+
+To restore the database from your remote server, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
+
+{: .note }
+It supports __.sql__ and __.sql.gz__ compressed file.
+
+### Restore
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
+ volumes:
+ - ./backup:/backup
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=postgres
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ ## SSH config
+ - SSH_HOST_NAME="hostname"
+ - SSH_PORT=22
+ - SSH_USER=user
+ - SSH_REMOTE_PATH=/home/jkaninda/backups
+ - SSH_IDENTIFY_FILE=/tmp/id_ed25519
+ ## We advise you to use a private jey instead of password
+ #- SSH_PASSWORD=password
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
\ No newline at end of file
diff --git a/docs/how-tos/restore.md b/docs/how-tos/restore.md
new file mode 100644
index 0000000..e0a8cb5
--- /dev/null
+++ b/docs/how-tos/restore.md
@@ -0,0 +1,43 @@
+---
+title: Restore database
+layout: default
+parent: How Tos
+nav_order: 4
+---
+
+# Restore database
+
+To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
+
+{: .note }
+It supports __.sql__ and __.sql.gz__ compressed file.
+
+### Restore
+
+```yml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup restore -d database -f store_20231219_022941.sql.gz
+ volumes:
+ - ./backup:/backup
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=mysql
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
\ No newline at end of file
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..a2bab04
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,103 @@
+---
+title: Overview
+layout: home
+nav_order: 1
+---
+
+# About mysql-bkup
+{:.no_toc}
+mysql-bkup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
+It also supports __encrypting__ your backups using GPG.
+
+We are open to receiving stars, PRs, and issues!
+
+
+{: .fs-6 .fw-300 }
+
+---
+
+The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
+It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
+
+It also supports __encrypting__ your backups using GPG.
+
+{: .note }
+Code and documentation for `v1` version on [this branch][v1-branch].
+
+[v1-branch]: https://github.com/jkaninda/mysql-bkup
+
+---
+
+## Quickstart
+
+### Simple backup using Docker CLI
+
+To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
+
+```shell
+ docker run --rm --network your_network_name \
+ -v $PWD/backup:/backup/ \
+ -e "DB_HOST=dbhost" \
+ -e "DB_USERNAME=username" \
+ -e "DB_PASSWORD=password" \
+ jkaninda/mysql-bkup mysql-bkup backup -d database_name
+```
+
+Alternatively, pass a `--env-file` in order to use a full config as described below.
+
+### Simple backup in docker compose file
+
+```yaml
+services:
+ mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
+ image: jkaninda/mysql-bkup
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup
+ volumes:
+ - ./backup:/backup
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=postgres
+ - DB_NAME=foo
+ - DB_USERNAME=bar
+ - DB_PASSWORD=password
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
+```
+
+## Available image registries
+
+This Docker image is published to both Docker Hub and the GitHub container registry.
+Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
+
+```
+docker pull jkaninda/mysql-bkup:v1.0
+docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
+```
+
+Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
+
+## Supported Engines
+
+This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
+While it may work against different implementations, there are no guarantees about support for non-Docker engines.
+
+## References
+
+We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
+
+- The original image is based on `ubuntu` and requires additional tools, making it heavy.
+- This image is written in Go.
+- `arm64` and `arm/v7` architectures are supported.
+- Docker in Swarm mode is supported.
+- Kubernetes is supported.
diff --git a/docs/old-version/index.md b/docs/old-version/index.md
new file mode 100644
index 0000000..3135211
--- /dev/null
+++ b/docs/old-version/index.md
@@ -0,0 +1,358 @@
+---
+layout: page
+title: Old version
+permalink: /old-version/
+---
+
+This is the documentation of mysql-backup for all old versions bellow `v1.0`.
+In the old version, S3 storage was mounted using s3fs, so we decided to migrate to the official AWS SDK.
+
+## Storage:
+- local
+- s3
+- Object storage
+
+## Volumes:
+
+- /s3mnt => S3 mounting path
+- /backup => local storage mounting path
+
+### Usage
+
+| Options | Shorts | Usage |
+|-----------------------|--------|------------------------------------------------------------------------|
+| mysql-bkup | bkup | CLI utility |
+| backup | | Backup database operation |
+| restore | | Restore database operation |
+| history | | Show the history of backup |
+| --storage | -s | Storage. local or s3 (default: local) |
+| --file | -f | File name to restore |
+| --path | | S3 path without file name. eg: /custom_path |
+| --dbname | -d | Database name |
+| --port | -p | Database port (default: 3306) |
+| --mode | -m | Execution mode. default or scheduled (default: default) |
+| --disable-compression | | Disable database backup compression |
+| --prune | | Delete old backup, default disabled |
+| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
+| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
+| --help | -h | Print this help message and exit |
+| --version | -V | Print version information and exit |
+
+
+## Environment variables
+
+| Name | Requirement | Description |
+|-------------|--------------------------------------------------|------------------------------------------------------|
+| DB_PORT | Optional, default 3306 | Database port number |
+| DB_HOST | Required | Database host |
+| DB_NAME | Optional if it was provided from the -d flag | Database name |
+| DB_USERNAME | Required | Database user name |
+| DB_PASSWORD | Required | Database password |
+| ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
+| SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
+| BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
+| S3_ENDPOINT | Optional, required for S3 storage | AWS S3 Endpoint |
+| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
+
+
+## Note:
+
+Creating a user for backup tasks who has read-only access is recommended!
+
+> create read-only user
+
+
+## Backup database :
+
+Simple backup usage
+
+```sh
+bkup backup
+```
+
+### S3
+
+```sh
+mysql-bkup backup --storage s3
+```
+## Docker run:
+
+```sh
+docker run --rm --network your_network_name \
+--name mysql-bkup -v $PWD/backup:/backup/ \
+-e "DB_HOST=database_host_name" \
+-e "DB_USERNAME=username" \
+-e "DB_PASSWORD=password" jkaninda/mysql-bkup:v0.7 mysql-bkup backup -d database_name
+```
+
+## Docker compose file:
+```yaml
+version: '3'
+services:
+ postgres:
+ image: postgres:14.5
+ container_name: postgres
+ restart: unless-stopped
+ volumes:
+ - ./postgres:/var/lib/postgresql/data
+ environment:
+ POSTGRES_DB: bkup
+ POSTGRES_PASSWORD: password
+ POSTGRES_USER: bkup
+ mysql-bkup:
+ image: jkaninda/mysql-bkup:v0.7
+ container_name: mysql-bkup
+ depends_on:
+ - postgres
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup -d bkup
+ volumes:
+ - ./backup:/backup
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=postgres
+ - DB_NAME=bkup
+ - DB_USERNAME=bkup
+ - DB_PASSWORD=password
+```
+## Restore database :
+
+Simple database restore operation usage
+
+```sh
+mysql-bkup restore --file database_20231217_115621.sql --dbname database_name
+```
+
+```sh
+mysql-bkup restore -f database_20231217_115621.sql -d database_name
+```
+### S3
+
+```sh
+mysql-bkup restore --storage s3 --file database_20231217_115621.sql --dbname database_name
+```
+
+## Docker run:
+
+```sh
+docker run --rm --network your_network_name \
+--name mysql-bkup \
+-v $PWD/backup:/backup/ \
+-e "DB_HOST=database_host_name" \
+-e "DB_USERNAME=username" \
+-e "DB_PASSWORD=password" \
+jkaninda/mysql-bkup:v0.7 mysql-bkup restore -d database_name -f store_20231219_022941.sql.gz
+```
+
+## Docker compose file:
+
+```yaml
+version: '3'
+services:
+ mysql-bkup:
+ image: jkaninda/mysql-bkup:v0.7
+ container_name: mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup restore --file database_20231217_115621.sql -d database_name
+ volumes:
+ - ./backup:/backup
+ environment:
+ #- FILE_NAME=database_20231217_040238.sql.gz # Optional if file name is set from command
+ - DB_PORT=3306
+ - DB_HOST=postgres
+ - DB_USERNAME=user_name
+ - DB_PASSWORD=password
+```
+## Run
+
+```sh
+docker-compose up -d
+```
+## Backup to S3
+
+```sh
+docker run --rm --privileged \
+--device /dev/fuse --name mysql-bkup \
+-e "DB_HOST=db_hostname" \
+-e "DB_USERNAME=username" \
+-e "DB_PASSWORD=password" \
+-e "ACCESS_KEY=your_access_key" \
+-e "SECRET_KEY=your_secret_key" \
+-e "BUCKETNAME=your_bucket_name" \
+-e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" \
+jkaninda/mysql-bkup:v0.7 mysql-bkup backup -s s3 -d database_name
+```
+> To change s3 backup path add this flag : --path /my_customPath . default path is /mysql-bkup
+
+Simple S3 backup usage
+
+```sh
+mysql-bkup backup --storage s3 --dbname mydatabase
+```
+```yaml
+ mysql-bkup:
+ image: jkaninda/mysql-bkup:v0.7
+ container_name: mysql-bkup
+ privileged: true
+ devices:
+ - "/dev/fuse"
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup restore --storage s3 -f database_20231217_115621.sql.gz --dbname database_name
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=postgress
+ - DB_USERNAME=user_name
+ - DB_PASSWORD=password
+ - ACCESS_KEY=${ACCESS_KEY}
+ - SECRET_KEY=${SECRET_KEY}
+ - BUCKET_NAME=${BUCKET_NAME}
+ - S3_ENDPOINT=${S3_ENDPOINT}
+
+```
+## Run in Scheduled mode
+
+This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
+For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
+
+Make an automated backup on Docker
+
+## Syntax of crontab (field description)
+
+The syntax is:
+
+- 1: Minute (0-59)
+- 2: Hours (0-23)
+- 3: Day (0-31)
+- 4: Month (0-12 [12 == December])
+- 5: Day of the week(0-7 [7 or 0 == sunday])
+
+Easy to remember format:
+
+```conf
+* * * * * command to be executed
+```
+
+```conf
+- - - - -
+| | | | |
+| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
+| | | ------- Month (1 - 12)
+| | --------- Day of month (1 - 31)
+| ----------- Hour (0 - 23)
+------------- Minute (0 - 59)
+```
+
+> At every 30th minute
+
+```conf
+*/30 * * * *
+```
+> “At minute 0.” every hour
+```conf
+0 * * * *
+```
+
+> “At 01:00.” every day
+
+```conf
+0 1 * * *
+```
+
+## Example of scheduled mode
+
+> Docker run :
+
+```sh
+docker run --rm --name mysql-bkup \
+-v $BACKUP_DIR:/backup/ \
+-e "DB_HOST=$DB_HOST" \
+-e "DB_USERNAME=$DB_USERNAME" \
+-e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:v0.7 mysql-bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *"
+```
+
+> With Docker compose
+
+```yaml
+version: "3"
+services:
+ mysql-bkup:
+ image: jkaninda/mysql-bkup:v0.7
+ container_name: mysql-bkup
+ privileged: true
+ devices:
+ - "/dev/fuse"
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "*/30 * * * *"
+ environment:
+ - DB_PORT=3306
+ - DB_HOST=postgreshost
+ - DB_USERNAME=userName
+ - DB_PASSWORD=${DB_PASSWORD}
+ - ACCESS_KEY=${ACCESS_KEY}
+ - SECRET_KEY=${SECRET_KEY}
+ - BUCKET_NAME=${BUCKET_NAME}
+ - S3_ENDPOINT=${S3_ENDPOINT}
+```
+
+## Kubernetes CronJob
+
+For Kubernetes, you don't need to run it in scheduled mode.
+
+Simple Kubernetes CronJob usage:
+
+```yaml
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: bkup-job
+spec:
+ schedule: "0 1 * * *"
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: mysql-bkup
+ image: jkaninda/mysql-bkup:v0.7
+ securityContext:
+ privileged: true
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup -s s3 --path /custom_path
+ env:
+ - name: DB_PORT
+ value: "3306"
+ - name: DB_HOST
+ value: ""
+ - name: DB_NAME
+ value: ""
+ - name: DB_USERNAME
+ value: ""
+ # Please use secret!
+ - name: DB_PASSWORD
+ value: ""
+ - name: ACCESS_KEY
+ value: ""
+ - name: SECRET_KEY
+ value: ""
+ - name: BUCKET_NAME
+ value: ""
+ - name: S3_ENDPOINT
+ value: "https://s3.us-west-2.amazonaws.com"
+ restartPolicy: Never
+```
+
+## Authors
+
+**Jonas Kaninda**
+-
+
diff --git a/docs/reference/index.md b/docs/reference/index.md
new file mode 100644
index 0000000..74c2f88
--- /dev/null
+++ b/docs/reference/index.md
@@ -0,0 +1,105 @@
+---
+title: Configuration Reference
+layout: default
+nav_order: 2
+---
+
+# Configuration reference
+
+Backup and restore targets, schedule and retention are configured using environment variables or flags.
+
+
+
+
+
+### CLI utility Usage
+
+| Options | Shorts | Usage |
+|-----------------------|--------|----------------------------------------------------------------------------------------|
+| mysql-bkup | bkup | CLI utility |
+| backup | | Backup database operation |
+| restore | | Restore database operation |
+| --storage | -s | Storage. local or s3 (default: local) |
+| --file | -f | File name for restoration |
+| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
+| --dbname | -d | Database name |
+| --port | -p | Database port (default: 3306) |
+| --mode | -m | Execution mode. default or scheduled (default: default) |
+| --disable-compression | | Disable database backup compression |
+| --prune | | Delete old backup, default disabled |
+| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
+| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
+| --help | -h | Print this help message and exit |
+| --version | -V | Print version information and exit |
+
+## Environment variables
+
+| Name | Requirement | Description |
+|-------------------|--------------------------------------------------|------------------------------------------------------|
+| DB_PORT | Optional, default 3306 | Database port number |
+| DB_HOST | Required | Database host |
+| DB_NAME | Optional if it was provided from the -d flag | Database name |
+| DB_USERNAME | Required | Database user name |
+| DB_PASSWORD | Required | Database password |
+| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
+| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
+| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
+| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
+| AWS_REGION | Optional, required for S3 storage | AWS Region |
+| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
+| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
+| Gmysql_PASSPHRASE | Optional, required to encrypt and restore backup | Gmysql passphrase |
+| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
+| SSH_USER | Optional, required for SSH storage | ssh remote user |
+| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
+| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
+| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
+| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
+
+---
+## Run in Scheduled mode
+
+This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
+For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
+
+## Syntax of crontab (field description)
+
+The syntax is:
+
+- 1: Minute (0-59)
+- 2: Hours (0-23)
+- 3: Day (0-31)
+- 4: Month (0-12 [12 == December])
+- 5: Day of the week(0-7 [7 or 0 == sunday])
+
+Easy to remember format:
+
+```conf
+* * * * * command to be executed
+```
+
+```conf
+- - - - -
+| | | | |
+| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
+| | | ------- Month (1 - 12)
+| | --------- Day of month (1 - 31)
+| ----------- Hour (0 - 23)
+------------- Minute (0 - 59)
+```
+
+> At every 30th minute
+
+```conf
+*/30 * * * *
+```
+> “At minute 0.” every hour
+```conf
+0 * * * *
+```
+
+> “At 01:00.” every day
+
+```conf
+0 1 * * *
+```
\ No newline at end of file
diff --git a/examples/docker-compose.s3.yaml b/examples/docker-compose.s3.yaml
index 29e0ca5..c4a3a88 100644
--- a/examples/docker-compose.s3.yaml
+++ b/examples/docker-compose.s3.yaml
@@ -1,21 +1,31 @@
-version: "3"
services:
mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
- privileged: true
- devices:
- - "/dev/fuse"
command:
- /bin/sh
- -c
- - mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name
+ - mysql-bkup backup --storage s3 -d my-database"
environment:
- DB_PORT=3306
- - DB_HOST=mysqlhost
- - DB_USERNAME=userName
- - DB_PASSWORD=${DB_PASSWORD}
- - ACCESS_KEY=${ACCESS_KEY}
- - SECRET_KEY=${SECRET_KEY}
- - BUCKET_NAME=${BUCKET_NAME}
- - S3_ENDPOINT=https://s3.us-west-2.amazonaws.com
\ No newline at end of file
+ - DB_HOST=postgres
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ ## AWS configurations
+ - AWS_S3_ENDPOINT=https://s3.amazonaws.com
+ - AWS_S3_BUCKET_NAME=backup
+ - AWS_REGION="us-west-2"
+ - AWS_ACCESS_KEY=xxxx
+ - AWS_SECRET_KEY=xxxxx
+ ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
+ - AWS_DISABLE_SSL="false"
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
\ No newline at end of file
diff --git a/examples/docker-compose.scheduled.local.yaml b/examples/docker-compose.scheduled.local.yaml
index d5bff1d..3e64c30 100644
--- a/examples/docker-compose.scheduled.local.yaml
+++ b/examples/docker-compose.scheduled.local.yaml
@@ -11,6 +11,6 @@ services:
- ./backup:/backup
environment:
- DB_PORT=3306
- - DB_HOST=mysqlhost
+ - DB_HOST=postgress
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
\ No newline at end of file
diff --git a/examples/docker-compose.scheduled.s3.yaml b/examples/docker-compose.scheduled.s3.yaml
index c7e4771..e9a1057 100644
--- a/examples/docker-compose.scheduled.s3.yaml
+++ b/examples/docker-compose.scheduled.s3.yaml
@@ -1,21 +1,31 @@
-version: "3"
services:
mysql-bkup:
+ # In production, it is advised to lock your image tag to a proper
+ # release version instead of using `latest`.
+ # Check https://github.com/jkaninda/mysql-bkup/releases
+ # for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
- privileged: true
- devices:
- - "/dev/fuse"
command:
- /bin/sh
- -c
- - mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "0 1 * * *"
+ - mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
environment:
- DB_PORT=3306
- - DB_HOST=mysqlhost
- - DB_USERNAME=userName
- - DB_PASSWORD=${DB_PASSWORD}
- - ACCESS_KEY=${ACCESS_KEY}
- - SECRET_KEY=${SECRET_KEY}
- - BUCKET_NAME=${BUCKET_NAME}
- - S3_ENDPOINT=https://s3.us-west-2.amazonaws.com
\ No newline at end of file
+ - DB_HOST=postgres
+ - DB_NAME=database
+ - DB_USERNAME=username
+ - DB_PASSWORD=password
+ ## AWS configurations
+ - AWS_S3_ENDPOINT=https://s3.amazonaws.com
+ - AWS_S3_BUCKET_NAME=backup
+ - AWS_REGION="us-west-2"
+ - AWS_ACCESS_KEY=xxxx
+ - AWS_SECRET_KEY=xxxxx
+ ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
+ - AWS_DISABLE_SSL="false"
+ # mysql-bkup container must be connected to the same network with your database
+ networks:
+ - web
+networks:
+ web:
\ No newline at end of file
diff --git a/examples/docker-compose.yaml b/examples/docker-compose.yaml
index a21cd50..c6f586d 100644
--- a/examples/docker-compose.yaml
+++ b/examples/docker-compose.yaml
@@ -11,6 +11,6 @@ services:
- ./backup:/backup
environment:
- DB_PORT=3306
- - DB_HOST=mysqlhost
+ - DB_HOST=postgress
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
\ No newline at end of file
diff --git a/examples/k8s-job.yaml b/examples/k8s-job.yaml
index 6e7c4a4..6d77e1b 100644
--- a/examples/k8s-job.yaml
+++ b/examples/k8s-job.yaml
@@ -1,7 +1,7 @@
-apiVersion: batch/v1
+piVersion: batch/v1
kind: CronJob
metadata:
- name: db-bkup-job
+ name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
@@ -9,32 +9,36 @@ spec:
template:
spec:
containers:
- - name: mysql-bkup
- image: jkaninda/mysql-bkup
- securityContext:
- privileged: true
- command:
- - /bin/sh
- - -c
- - mysql-bkup backup --storage s3 --path /custom_path
- env:
- - name: DB_PORT
- value: "3306"
- - name: DB_HOST
- value: ""
- - name: DB_NAME
- value: ""
- - name: DB_USERNAME
- value: ""
- # Please use secret!
- - name: DB_PASSWORD
- value: "password"
- - name: ACCESS_KEY
- value: ""
- - name: SECRET_KEY
- value: ""
- - name: BUCKETNAME
- value: ""
- - name: S3_ENDPOINT
- value: "https://s3.us-west-2.amazonaws.com"
- restartPolicy: Never
\ No newline at end of file
+ - name: mysql-bkup
+ image: jkaninda/mysql-bkup
+ command:
+ - /bin/sh
+ - -c
+ - mysql-bkup backup -s s3 --path /custom_path
+ env:
+ - name: DB_PORT
+ value: "3306"
+ - name: DB_HOST
+ value: ""
+ - name: DB_NAME
+ value: ""
+ - name: DB_USERNAME
+ value: ""
+ # Please use secret!
+ - name: DB_PASSWORD
+ value: ""
+ - name: ACCESS_KEY
+ value: ""
+ - name: AWS_S3_ENDPOINT
+ value: "https://s3.amazonaws.com"
+ - name: AWS_S3_BUCKET_NAME
+ value: "xxx"
+ - name: AWS_REGION
+ value: "us-west-2"
+ - name: AWS_ACCESS_KEY
+ value: "xxxx"
+ - name: AWS_SECRET_KEY
+ value: "xxxx"
+ - name: AWS_DISABLE_SSL
+ value: "false"
+ restartPolicy: OnFailure
\ No newline at end of file
diff --git a/go.mod b/go.mod
index e355994..c39c40e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,13 +1,21 @@
module github.com/jkaninda/mysql-bkup
-go 1.21.0
+go 1.22.5
require github.com/spf13/pflag v1.0.5
require (
- github.com/hpcloud/tail v1.0.0 // indirect
+ github.com/aws/aws-sdk-go v1.55.3
+ github.com/bramvdbogaerde/go-scp v1.5.0
+ github.com/hpcloud/tail v1.0.0
+ github.com/spf13/cobra v1.8.0
+ golang.org/x/crypto v0.18.0
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
+)
+
+require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/spf13/cobra v1.8.0 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
golang.org/x/sys v0.22.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
diff --git a/go.sum b/go.sum
index 426c610..d9de32f 100644
--- a/go.sum
+++ b/go.sum
@@ -1,13 +1,32 @@
+github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
+github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
+github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
+github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -15,4 +34,5 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/pkg/backup.go b/pkg/backup.go
index a2a5fff..46bf242 100644
--- a/pkg/backup.go
+++ b/pkg/backup.go
@@ -17,6 +17,7 @@ import (
)
func StartBackup(cmd *cobra.Command) {
+ _, _ = cmd.Flags().GetString("operation")
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
@@ -24,23 +25,41 @@ func StartBackup(cmd *cobra.Command) {
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
//Get flag value and set env
- s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
+ s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
+ remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
- disableCompression, _ = cmd.Flags().GetBool("disable-compression")
- keepLast, _ := cmd.Flags().GetInt("keep-last")
+ backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune")
+ disableCompression, _ = cmd.Flags().GetBool("disable-compression")
executionMode, _ = cmd.Flags().GetString("mode")
+ dbName = os.Getenv("DB_NAME")
+ gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
+ //
+ if gpqPassphrase != "" {
+ encryption = true
+ }
+
+ //Generate file name
+ backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
+ if disableCompression {
+ backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
+ }
if executionMode == "default" {
- if storage == "s3" {
- utils.Info("Backup database to s3 storage")
- s3Backup(disableCompression, s3Path, prune, keepLast)
- } else {
- utils.Info("Backup database to local storage")
- BackupDatabase(disableCompression, prune, keepLast)
-
+ switch storage {
+ case "s3":
+ s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption)
+ case "local":
+ localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
+ case "ssh", "remote":
+ sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
+ case "ftp":
+ utils.Fatal("Not supported storage type: %s", storage)
+ default:
+ localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
}
+
} else if executionMode == "scheduled" {
scheduledMode()
} else {
@@ -71,9 +90,9 @@ func scheduledMode() {
cmd := exec.Command("supervisord", "-c", supervisorConfig)
err := cmd.Start()
if err != nil {
- utils.Fatal("Failed to start supervisord: %v", err)
+ utils.Fatal(fmt.Sprintf("Failed to start supervisord: %v", err))
}
- utils.Info("Starting backup job...")
+ utils.Info("Backup job started")
defer func() {
if err := cmd.Process.Kill(); err != nil {
utils.Info("Failed to kill supervisord process: %v", err)
@@ -82,11 +101,11 @@ func scheduledMode() {
}
}()
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) {
- utils.Fatal("Log file %s does not exist.", cronLogFile)
+ utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile))
}
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true})
if err != nil {
- utils.Fatalf("Failed to tail file: %v", err)
+ utils.Fatal("Failed to tail file: %v", err)
}
// Read and print new lines from the log file
@@ -96,136 +115,171 @@ func scheduledMode() {
}
// BackupDatabase backup database
-func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
+func BackupDatabase(backupFileName string, disableCompression bool) {
dbHost = os.Getenv("DB_HOST")
- dbPassword := os.Getenv("DB_PASSWORD")
- dbUserName := os.Getenv("DB_USERNAME")
+ dbPassword = os.Getenv("DB_PASSWORD")
+ dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
storagePath = os.Getenv("STORAGE_PATH")
- if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
- utils.Fatal("Please make sure all required environment variables for database are set")
- } else {
- utils.TestDatabaseConnection()
- // Backup Database database
- utils.Info("Backing up database...")
- //Generate file name
- bkFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
-
- // Verify is compression is disabled
- if disableCompression {
- //Generate file name
- bkFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
- // Execute mysqldump
- cmd := exec.Command("mysqldump",
- "-h", dbHost,
- "-P", dbPort,
- "-u", dbUserName,
- "--password="+dbPassword,
- dbName,
- )
- output, err := cmd.Output()
- if err != nil {
- log.Fatal(err)
- }
-
- // save output
- file, err := os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
- if err != nil {
- log.Fatal(err)
- }
- defer file.Close()
-
- _, err = file.Write(output)
- if err != nil {
- log.Fatal(err)
- }
- utils.Done("Database has been backed up")
-
- } else {
- // Execute mysqldump
- cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName)
- stdout, err := cmd.StdoutPipe()
- if err != nil {
- log.Fatal(err)
- }
- gzipCmd := exec.Command("gzip")
- gzipCmd.Stdin = stdout
- gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
- gzipCmd.Start()
- if err != nil {
- log.Fatal(err)
- }
- if err := cmd.Run(); err != nil {
- log.Fatal(err)
- }
- if err := gzipCmd.Wait(); err != nil {
- log.Fatal(err)
- }
- utils.Done("Database has been backed up")
- }
-
- //Delete old backup
- if prune {
- deleteOldBackup(keepLast)
- }
- historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", storagePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- log.Fatal(err)
- }
- defer historyFile.Close()
- if _, err := historyFile.WriteString(bkFileName + "\n"); err != nil {
- log.Fatal(err)
- }
+ // dbHVars Required environment variables for database
+ var dbHVars = []string{
+ "DB_HOST",
+ "DB_PASSWORD",
+ "DB_USERNAME",
+ "DB_NAME",
}
-
-}
-
-func s3Backup(disableCompression bool, s3Path string, prune bool, keepLast int) {
- // Backup Database to S3 storage
- MountS3Storage(s3Path)
- BackupDatabase(disableCompression, prune, keepLast)
-}
-
-func deleteOldBackup(keepLast int) {
- utils.Info("Deleting old backups...")
- storagePath = os.Getenv("STORAGE_PATH")
- // Define the directory path
- backupDir := storagePath + "/"
- // Get current time
- currentTime := time.Now()
- // Delete file
- deleteFile := func(filePath string) error {
- err := os.Remove(filePath)
- if err != nil {
- utils.Fatal("Error:", err)
- } else {
- utils.Done("File ", filePath, " deleted successfully")
- }
- return err
- }
-
- // Walk through the directory and delete files modified more than specified days ago
- err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- // Check if it's a regular file and if it was modified more than specified days ago
- if fileInfo.Mode().IsRegular() {
- timeDiff := currentTime.Sub(fileInfo.ModTime())
- if timeDiff.Hours() > 24*float64(keepLast) {
- err := deleteFile(filePath)
- if err != nil {
- return err
- }
- }
- }
- return nil
- })
-
+ err := utils.CheckEnvVars(dbHVars)
if err != nil {
- utils.Fatal("Error:", err)
- return
+ utils.Error("Please make sure all required environment variables for database are set")
+ utils.Fatal("Error checking environment variables: %s", err)
+ }
+
+ utils.Info("Starting database backup...")
+ utils.TestDatabaseConnection()
+
+ // Backup Database database
+ utils.Info("Backing up database...")
+
+ if disableCompression {
+ // Execute mysqldump
+ cmd := exec.Command("mysqldump",
+ "-h", dbHost,
+ "-P", dbPort,
+ "-u", dbUserName,
+ "--password="+dbPassword,
+ dbName,
+ )
+ output, err := cmd.Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // save output
+ file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer file.Close()
+
+ _, err = file.Write(output)
+ if err != nil {
+ log.Fatal(err)
+ }
+ utils.Done("Database has been backed up")
+
+ } else {
+ // Execute mysqldump
+ cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName)
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ log.Fatal(err)
+ }
+ gzipCmd := exec.Command("gzip")
+ gzipCmd.Stdin = stdout
+ gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
+ gzipCmd.Start()
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := cmd.Run(); err != nil {
+ log.Fatal(err)
+ }
+ if err := gzipCmd.Wait(); err != nil {
+ log.Fatal(err)
+ }
+ utils.Done("Database has been backed up")
+
+ }
+
+}
+func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
+ utils.Info("Backup database to local storage")
+ BackupDatabase(backupFileName, disableCompression)
+ finalFileName := backupFileName
+ if encrypt {
+ encryptBackup(backupFileName)
+ finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
+ }
+ utils.Info("Backup name is %s", finalFileName)
+ moveToBackup(finalFileName, storagePath)
+ //Delete old backup
+ if prune {
+ deleteOldBackup(backupRetention)
}
}
+
+func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
+ bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
+ utils.Info("Backup database to s3 storage")
+ //Backup database
+ BackupDatabase(backupFileName, disableCompression)
+ finalFileName := backupFileName
+ if encrypt {
+ encryptBackup(backupFileName)
+ finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
+ }
+ utils.Info("Uploading backup file to S3 storage...")
+ utils.Info("Backup name is %s", finalFileName)
+ err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
+ if err != nil {
+ utils.Fatal("Error uploading file to S3: %s ", err)
+
+ }
+
+ //Delete backup file from tmp folder
+ err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
+ if err != nil {
+ fmt.Println("Error deleting file: ", err)
+
+ }
+ // Delete old backup
+ if prune {
+ err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
+ if err != nil {
+ utils.Fatal("Error deleting old backup from S3: %s ", err)
+ }
+ }
+ utils.Done("Database has been backed up and uploaded to s3 ")
+}
+func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
+ utils.Info("Backup database to Remote server")
+ //Backup database
+ BackupDatabase(backupFileName, disableCompression)
+ finalFileName := backupFileName
+ if encrypt {
+ encryptBackup(backupFileName)
+ finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
+ }
+ utils.Info("Uploading backup file to remote server...")
+ utils.Info("Backup name is %s", finalFileName)
+ err := CopyToRemote(finalFileName, remotePath)
+ if err != nil {
+ utils.Fatal("Error uploading file to the remote server: %s ", err)
+
+ }
+
+ //Delete backup file from tmp folder
+ err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
+ if err != nil {
+ fmt.Println("Error deleting file: ", err)
+
+ }
+ if prune {
+ //TODO: Delete old backup from remote server
+ utils.Info("Deleting old backup from a remote server is not implemented yet")
+
+ }
+
+ utils.Done("Database has been backed up and uploaded to remote server ")
+}
+
+func encryptBackup(backupFileName string) {
+ gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
+ err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
+ if err != nil {
+ utils.Fatal("Error during encrypting backup %s", err)
+ }
+
+}
diff --git a/pkg/config.go b/pkg/config.go
new file mode 100644
index 0000000..d0b5e01
--- /dev/null
+++ b/pkg/config.go
@@ -0,0 +1,4 @@
+package pkg
+
+type Config struct {
+}
diff --git a/pkg/encrypt.go b/pkg/encrypt.go
new file mode 100644
index 0000000..6b067d3
--- /dev/null
+++ b/pkg/encrypt.go
@@ -0,0 +1,48 @@
+package pkg
+
+import (
+ "fmt"
+ "github.com/jkaninda/mysql-bkup/utils"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func Decrypt(inputFile string, passphrase string) error {
+ utils.Info("Decrypting backup file: " + inputFile + " ...")
+ cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+
+ err := cmd.Run()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error: %v\n", err)
+ return err
+ }
+
+ utils.Info("Backup file decrypted successful!")
+ return nil
+}
+
+func Encrypt(inputFile string, passphrase string) error {
+ utils.Info("Encrypting backup...")
+ cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+
+ err := cmd.Run()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error: %v\n", err)
+ return err
+ }
+
+ utils.Info("Backup file encrypted successful!")
+ return nil
+}
+
+func RemoveLastExtension(filename string) string {
+ if idx := strings.LastIndex(filename, "."); idx != -1 {
+ return filename[:idx]
+ }
+ return filename
+}
diff --git a/pkg/helper.go b/pkg/helper.go
new file mode 100644
index 0000000..b016789
--- /dev/null
+++ b/pkg/helper.go
@@ -0,0 +1,74 @@
+package pkg
+
+import (
+ "fmt"
+ "github.com/jkaninda/mysql-bkup/utils"
+ "os"
+ "path/filepath"
+ "time"
+)
+
+func copyToTmp(sourcePath string, backupFileName string) {
+ //Copy backup from storage to /tmp
+ err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
+ if err != nil {
+ utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
+
+ }
+}
+func moveToBackup(backupFileName string, destinationPath string) {
+ //Copy backup from tmp folder to storage destination
+ err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
+ if err != nil {
+ utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
+
+ }
+ //Delete backup file from tmp folder
+ err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
+ if err != nil {
+ fmt.Println("Error deleting file:", err)
+
+ }
+ utils.Done("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
+}
+func deleteOldBackup(retentionDays int) {
+ utils.Info("Deleting old backups...")
+ storagePath = os.Getenv("STORAGE_PATH")
+ // Define the directory path
+ backupDir := storagePath + "/"
+ // Get current time
+ currentTime := time.Now()
+ // Delete file
+ deleteFile := func(filePath string) error {
+ err := os.Remove(filePath)
+ if err != nil {
+ utils.Fatal(fmt.Sprintf("Error: %s", err))
+ } else {
+ utils.Done("File %s has been deleted successfully", filePath)
+ }
+ return err
+ }
+
+ // Walk through the directory and delete files modified more than specified days ago
+ err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ // Check if it's a regular file and if it was modified more than specified days ago
+ if fileInfo.Mode().IsRegular() {
+ timeDiff := currentTime.Sub(fileInfo.ModTime())
+ if timeDiff.Hours() > 24*float64(retentionDays) {
+ err := deleteFile(filePath)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ })
+
+ if err != nil {
+ utils.Fatal(fmt.Sprintf("Error: %s", err))
+ return
+ }
+}
diff --git a/pkg/restore.go b/pkg/restore.go
index 92aaa3d..67c20df 100644
--- a/pkg/restore.go
+++ b/pkg/restore.go
@@ -17,70 +17,118 @@ func StartRestore(cmd *cobra.Command) {
utils.GetEnv(cmd, "port", "DB_PORT")
//Get flag value and set env
- s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
+ s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
+ remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode")
-
- if storage == "s3" {
- utils.Info("Restore database from s3")
- s3Restore(file, s3Path)
- } else {
+ bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
+ switch storage {
+ case "s3":
+ restoreFromS3(file, bucket, s3Path)
+ case "local":
+ utils.Info("Restore database from local")
+ copyToTmp(storagePath, file)
+ RestoreDatabase(file)
+ case "ssh":
+ restoreFromRemote(file, remotePath)
+ case "ftp":
+ utils.Fatal("Restore from FTP is not yet supported")
+ default:
utils.Info("Restore database from local")
RestoreDatabase(file)
-
}
}
+func restoreFromS3(file, bucket, s3Path string) {
+ utils.Info("Restore database from s3")
+ err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
+ if err != nil {
+ utils.Fatal(fmt.Sprintf("Error download file from s3 %s %s", file, err))
+ }
+ RestoreDatabase(file)
+}
+func restoreFromRemote(file, remotePath string) {
+ utils.Info("Restore database from remote server")
+ err := CopyFromRemote(file, remotePath)
+ if err != nil {
+ utils.Fatal(fmt.Sprintf("Error download file from remote server: ", filepath.Join(remotePath, file), err))
+ }
+ RestoreDatabase(file)
+}
+
// RestoreDatabase restore database
func RestoreDatabase(file string) {
dbHost = os.Getenv("DB_HOST")
+ dbPassword = os.Getenv("DB_PASSWORD")
+ dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
- storagePath = os.Getenv("STORAGE_PATH")
+ gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
if file == "" {
utils.Fatal("Error, file required")
}
+ // dbHVars Required environment variables for database
+ var dbHVars = []string{
+ "DB_HOST",
+ "DB_PASSWORD",
+ "DB_USERNAME",
+ "DB_NAME",
+ }
+ err := utils.CheckEnvVars(dbHVars)
+ if err != nil {
+ utils.Error("Please make sure all required environment variables for database are set")
+ utils.Fatal("Error checking environment variables: %s", err)
+ }
- if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" {
- utils.Fatal("Please make sure all required environment variables are set")
- } else {
-
- if utils.FileExists(fmt.Sprintf("%s/%s", storagePath, file)) {
- utils.TestDatabaseConnection()
-
- extension := filepath.Ext(fmt.Sprintf("%s/%s", storagePath, file))
- // Restore from compressed file / .sql.gz
- if extension == ".gz" {
- str := "zcat " + fmt.Sprintf("%s/%s", storagePath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
- _, err := exec.Command("bash", "-c", str).Output()
- if err != nil {
- utils.Fatal("Error, in restoring the database")
- }
-
- utils.Done("Database has been restored")
-
- } else if extension == ".sql" {
- //Restore from sql file
- str := "cat " + fmt.Sprintf("%s/%s", storagePath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
- _, err := exec.Command("bash", "-c", str).Output()
- if err != nil {
- utils.Fatal("Error, in restoring the database", err)
- }
-
- utils.Done("Database has been restored")
- } else {
- utils.Fatal("Unknown file extension ", extension)
- }
+ extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
+ if extension == ".gpg" {
+ if gpgPassphrase == "" {
+ utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
} else {
- utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", storagePath, file))
+ //Decrypt file
+ err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
+ if err != nil {
+ utils.Fatal("Error decrypting file ", file, err)
+ }
+ //Update file name
+ file = RemoveLastExtension(file)
}
}
-}
-func s3Restore(file, s3Path string) {
- // Restore database from S3
- MountS3Storage(s3Path)
- RestoreDatabase(file)
+
+ if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
+
+ err := os.Setenv("mysqlPASSWORD", dbPassword)
+ if err != nil {
+ return
+ }
+ utils.TestDatabaseConnection()
+
+ extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
+ // Restore from compressed file / .sql.gz
+ if extension == ".gz" {
+ str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
+ _, err := exec.Command("bash", "-c", str).Output()
+ if err != nil {
+ utils.Fatal(fmt.Sprintf("Error, in restoring the database %s", err))
+ }
+ utils.Done("Database has been restored")
+
+ } else if extension == ".sql" {
+ //Restore from sql file
+ str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
+ _, err := exec.Command("bash", "-c", str).Output()
+ if err != nil {
+ utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
+ }
+ utils.Done("Database has been restored")
+ } else {
+ utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
+ }
+
+ } else {
+ utils.Fatal(fmt.Sprintf("File not found in %s", fmt.Sprintf("%s/%s", tmpPath, file)))
+ }
}
diff --git a/pkg/s3fs.go b/pkg/s3fs.go
deleted file mode 100644
index c1543e5..0000000
--- a/pkg/s3fs.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Package pkg /*
-/*
-Copyright © 2024 Jonas Kaninda
-*/
-package pkg
-
-import (
- "fmt"
- "github.com/jkaninda/mysql-bkup/utils"
- "os"
- "os/exec"
-)
-
-var (
- accessKey = ""
- secretKey = ""
- bucketName = ""
- s3Endpoint = ""
-)
-
-func S3Mount() {
- MountS3Storage(s3Path)
-}
-
-// MountS3Storage Mount s3 storage using s3fs
-func MountS3Storage(s3Path string) {
- accessKey = os.Getenv("ACCESS_KEY")
- secretKey = os.Getenv("SECRET_KEY")
- bucketName = os.Getenv("BUCKET_NAME")
- if bucketName == "" {
- bucketName = os.Getenv("BUCKETNAME")
- }
- s3Endpoint = os.Getenv("S3_ENDPOINT")
-
- if accessKey == "" || secretKey == "" || bucketName == "" {
- utils.Fatal("Please make sure all environment variables are set for S3")
- } else {
- storagePath := fmt.Sprintf("%s%s", s3MountPath, s3Path)
- err := os.Setenv("STORAGE_PATH", storagePath)
- if err != nil {
- return
- }
-
- //Write file
- err = utils.WriteToFile(s3fsPasswdFile, fmt.Sprintf("%s:%s", accessKey, secretKey))
- if err != nil {
- utils.Fatal("Error creating file")
- }
- //Change file permission
- utils.ChangePermission(s3fsPasswdFile, 0600)
-
- //Mount object storage
- utils.Info("Mounting Object storage in ", s3MountPath)
- if isEmpty, _ := utils.IsDirEmpty(s3MountPath); isEmpty {
- cmd := exec.Command("s3fs", bucketName, s3MountPath,
- "-o", "passwd_file="+s3fsPasswdFile,
- "-o", "use_cache=/tmp/s3cache",
- "-o", "allow_other",
- "-o", "url="+s3Endpoint,
- "-o", "use_path_request_style",
- )
-
- if err := cmd.Run(); err != nil {
- utils.Fatal("Error mounting Object storage:", err)
- }
-
- if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
- utils.Fatalf("Error creating directory %v %v", storagePath, err)
- }
-
- } else {
- utils.Info("Object storage already mounted in " + s3MountPath)
- if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
- utils.Fatal("Error creating directory "+storagePath, err)
- }
-
- }
-
- }
-}
diff --git a/pkg/scp.go b/pkg/scp.go
new file mode 100644
index 0000000..6d9e935
--- /dev/null
+++ b/pkg/scp.go
@@ -0,0 +1,117 @@
+package pkg
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/bramvdbogaerde/go-scp"
+ "github.com/bramvdbogaerde/go-scp/auth"
+ "github.com/jkaninda/mysql-bkup/utils"
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/exp/slog"
+ "os"
+ "path/filepath"
+)
+
+func CopyToRemote(fileName, remotePath string) error {
+ sshUser := os.Getenv("SSH_USER")
+ sshPassword := os.Getenv("SSH_PASSWORD")
+ sshHostName := os.Getenv("SSH_HOST_NAME")
+ sshPort := os.Getenv("SSH_PORT")
+ sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
+
+ // SSSHVars Required environment variables for SSH remote server storage
+ var sshHVars = []string{
+ "SSH_USER",
+ "SSH_REMOTE_PATH",
+ "SSH_HOST_NAME",
+ "SSH_PORT",
+ }
+ err := utils.CheckEnvVars(sshHVars)
+ if err != nil {
+ slog.Error(fmt.Sprintf("Error checking environment variables\n: %s", err))
+ os.Exit(1)
+ }
+
+ clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
+ if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
+ clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
+
+ } else {
+ if sshPassword == "" {
+ return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
+ }
+ slog.Warn("Accessing the remote server using password, password is not recommended\n")
+ clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
+
+ }
+ // Create a new SCP client
+ client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
+
+ // Connect to the remote server
+ err = client.Connect()
+ if err != nil {
+ return errors.New("Couldn't establish a connection to the remote server\n")
+ }
+
+ // Open a file
+ file, _ := os.Open(filepath.Join(tmpPath, fileName))
+
+ // Close client connection after the file has been copied
+ defer client.Close()
+ // Close the file after it has been copied
+ defer file.Close()
+ // the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
+ err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
+ if err != nil {
+ fmt.Println("Error while copying file ")
+ return err
+ }
+ return nil
+}
+
+func CopyFromRemote(fileName, remotePath string) error {
+ sshUser := os.Getenv("SSH_USER")
+ sshPassword := os.Getenv("SSH_PASSWORD")
+ sshHostName := os.Getenv("SSH_HOST_NAME")
+ sshPort := os.Getenv("SSH_PORT")
+ sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
+
+ clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
+ if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
+ clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
+
+ } else {
+ if sshPassword == "" {
+ return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
+ }
+ slog.Warn("Accessing the remote server using password, password is not recommended\n")
+ clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
+
+ }
+ // Create a new SCP client
+ client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
+
+ // Connect to the remote server
+ err := client.Connect()
+ if err != nil {
+ return errors.New("Couldn't establish a connection to the remote server\n")
+ }
+ // Close client connection after the file has been copied
+ defer client.Close()
+ file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
+ if err != nil {
+ fmt.Println("Couldn't open the output file")
+ }
+ defer file.Close()
+
+ // the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
+ err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
+
+ if err != nil {
+ fmt.Println("Error while copying file ", err)
+ return err
+ }
+ return nil
+
+}
diff --git a/pkg/scripts.go b/pkg/scripts.go
index 1011da1..27bf05f 100644
--- a/pkg/scripts.go
+++ b/pkg/scripts.go
@@ -15,7 +15,7 @@ func CreateCrontabScript(disableCompression bool, storage string) {
//task := "/usr/local/bin/backup_cron.sh"
touchCmd := exec.Command("touch", backupCronFile)
if err := touchCmd.Run(); err != nil {
- utils.Fatalf("Error creating file %s: %v\n", backupCronFile, err)
+ utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
}
var disableC = ""
if disableCompression {
@@ -37,36 +37,36 @@ bkup backup --dbname %s --port %s %v
}
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
- utils.Fatalf("Error writing to %s: %v\n", backupCronFile, err)
+ utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
}
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
if err := chmodCmd.Run(); err != nil {
- utils.Fatalf("Error changing permissions of %s: %v\n", backupCronFile, err)
+ utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
}
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
if err := lnCmd.Run(); err != nil {
- utils.Fatalf("Error creating symbolic link: %v\n", err)
+ utils.Fatal("Error creating symbolic link: %v\n", err)
}
touchLogCmd := exec.Command("touch", cronLogFile)
if err := touchLogCmd.Run(); err != nil {
- utils.Fatalf("Error creating file %s: %v\n", cronLogFile, err)
+ utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
}
cronJob := "/etc/cron.d/backup_cron"
touchCronCmd := exec.Command("touch", cronJob)
if err := touchCronCmd.Run(); err != nil {
- utils.Fatalf("Error creating file %s: %v\n", cronJob, err)
+ utils.Fatal("Error creating file %s: %v\n", cronJob, err)
}
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
- utils.Fatalf("Error writing to %s: %v\n", cronJob, err)
+ utils.Fatal("Error writing to %s: %v\n", cronJob, err)
}
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
@@ -74,5 +74,5 @@ bkup backup --dbname %s --port %s %v
if err := crontabCmd.Run(); err != nil {
utils.Fatal("Error updating crontab: ", err)
}
- utils.Info("Starting backup in scheduled mode")
+ utils.Info("Backup job created.")
}
diff --git a/pkg/var.go b/pkg/var.go
index 7c4e480..c1eab81 100644
--- a/pkg/var.go
+++ b/pkg/var.go
@@ -1,18 +1,21 @@
package pkg
-const s3MountPath string = "/s3mnt"
-const s3fsPasswdFile string = "/etc/passwd-s3fs"
const cronLogFile = "/var/log/mysql-bkup.log"
+const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
+const algorithm = "aes256"
+const gpgExtension = "gpg"
var (
storage = "local"
file = ""
- s3Path = "/mysql-bkup"
+ dbPassword = ""
+ dbUserName = ""
dbName = ""
dbHost = ""
dbPort = "3306"
executionMode = "default"
storagePath = "/backup"
disableCompression = false
+ encryption = false
)
diff --git a/utils/s3.go b/utils/s3.go
new file mode 100644
index 0000000..30d3b4e
--- /dev/null
+++ b/utils/s3.go
@@ -0,0 +1,171 @@
+package utils
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ "golang.org/x/exp/slog"
+ "log"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strconv"
+ "time"
+)
+
+// CreateSession creates a new AWS session
+func CreateSession() (*session.Session, error) {
+ // AwsVars Required environment variables for AWS S3 storage
+ var awsVars = []string{
+ "AWS_S3_ENDPOINT",
+ "AWS_S3_BUCKET_NAME",
+ "AWS_ACCESS_KEY",
+ "AWS_SECRET_KEY",
+ "AWS_REGION",
+ "AWS_REGION",
+ "AWS_REGION",
+ }
+
+ endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
+ accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
+ secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
+ _ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
+
+ region := os.Getenv("AWS_REGION")
+ awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
+ if err != nil {
+ Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
+ }
+
+ err = CheckEnvVars(awsVars)
+ if err != nil {
+ slog.Error(fmt.Sprintf("Error checking environment variables\n: %s", err))
+ os.Exit(1)
+ }
+ // S3 Config
+ s3Config := &aws.Config{
+ Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
+ Endpoint: aws.String(endPoint),
+ Region: aws.String(region),
+ DisableSSL: aws.Bool(awsDisableSsl),
+ S3ForcePathStyle: aws.Bool(true),
+ }
+ return session.NewSession(s3Config)
+
+}
+
+// UploadFileToS3 uploads a file to S3 with a given prefix
+func UploadFileToS3(filePath, key, bucket, prefix string) error {
+ sess, err := CreateSession()
+ if err != nil {
+ return err
+ }
+
+ svc := s3.New(sess)
+
+ file, err := os.Open(filepath.Join(filePath, key))
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ fileInfo, err := file.Stat()
+ if err != nil {
+ return err
+ }
+
+ objectKey := filepath.Join(prefix, key)
+
+ buffer := make([]byte, fileInfo.Size())
+ file.Read(buffer)
+ fileBytes := bytes.NewReader(buffer)
+ fileType := http.DetectContentType(buffer)
+
+ _, err = svc.PutObject(&s3.PutObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(objectKey),
+ Body: fileBytes,
+ ContentLength: aws.Int64(fileInfo.Size()),
+ ContentType: aws.String(fileType),
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+func DownloadFile(destinationPath, key, bucket, prefix string) error {
+
+ sess, err := CreateSession()
+ if err != nil {
+ return err
+ }
+ Info("Download backup from S3 storage...")
+ file, err := os.Create(filepath.Join(destinationPath, key))
+ if err != nil {
+ fmt.Println("Failed to create file", err)
+ return err
+ }
+ defer file.Close()
+
+ objectKey := filepath.Join(prefix, key)
+
+ downloader := s3manager.NewDownloader(sess)
+ numBytes, err := downloader.Download(file,
+ &s3.GetObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(objectKey),
+ })
+ if err != nil {
+ fmt.Println("Failed to download file", err)
+ return err
+ }
+ Info(fmt.Sprintf("Backup downloaded: ", file.Name(), " bytes size ", numBytes))
+
+ return nil
+}
+func DeleteOldBackup(bucket, prefix string, retention int) error {
+ sess, err := CreateSession()
+ if err != nil {
+ return err
+ }
+
+ svc := s3.New(sess)
+
+ // Get the current time and the time threshold for 7 days ago
+ now := time.Now()
+ backupRetentionDays := now.AddDate(0, 0, -retention)
+
+ // List objects in the bucket
+ listObjectsInput := &s3.ListObjectsV2Input{
+ Bucket: aws.String(bucket),
+ Prefix: aws.String(prefix),
+ }
+ err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
+ for _, object := range page.Contents {
+ if object.LastModified.Before(backupRetentionDays) {
+ // Object is older than retention days, delete it
+ _, err := svc.DeleteObject(&s3.DeleteObjectInput{
+ Bucket: aws.String(bucket),
+ Key: object.Key,
+ })
+ if err != nil {
+ log.Printf("Failed to delete object %s: %v", *object.Key, err)
+ } else {
+ fmt.Printf("Deleted object %s\n", *object.Key)
+ }
+ }
+ }
+ return !lastPage
+ })
+ if err != nil {
+ log.Fatalf("Failed to list objects: %v", err)
+ }
+
+ fmt.Println("Finished deleting old files.")
+ return nil
+}
diff --git a/utils/utils.go b/utils/utils.go
index e17b801..16caf72 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -7,25 +7,51 @@ package utils
* @link https://github.com/jkaninda/mysql-bkup
**/
import (
+ "bytes"
"fmt"
"github.com/spf13/cobra"
+ "golang.org/x/exp/slog"
+ "io"
"io/fs"
"os"
"os/exec"
)
-func Info(v ...any) {
- fmt.Println("⒤ ", fmt.Sprint(v...))
+func Info(msg string, args ...any) {
+ if len(args) == 0 {
+ slog.Info(msg)
+ } else {
+ slog.Info(fmt.Sprintf(msg, args...))
+ }
}
-func Done(v ...any) {
- fmt.Println("✔ ", fmt.Sprint(v...))
+func Worn(msg string, args ...any) {
+ if len(args) == 0 {
+ slog.Warn(msg)
+ } else {
+ slog.Warn(fmt.Sprintf(msg, args...))
+ }
}
-func Fatal(v ...any) {
- fmt.Println("✘ ", fmt.Sprint(v...))
- os.Exit(1)
+func Error(msg string, args ...any) {
+ if len(args) == 0 {
+ slog.Error(msg)
+ } else {
+ slog.Error(fmt.Sprintf(msg, args...))
+ }
}
-func Fatalf(msg string, v ...any) {
- fmt.Printf("✘ "+msg, v...)
+func Done(msg string, args ...any) {
+ if len(args) == 0 {
+ slog.Info(msg)
+ } else {
+ slog.Info(fmt.Sprintf(msg, args...))
+ }
+}
+func Fatal(msg string, args ...any) {
+ // Fatal logs an error message and exits the program.
+ if len(args) == 0 {
+ slog.Error(msg)
+ } else {
+ slog.Error(fmt.Sprintf(msg, args...))
+ }
os.Exit(1)
}
@@ -47,9 +73,45 @@ func WriteToFile(filePath, content string) error {
_, err = file.WriteString(content)
return err
}
+func DeleteFile(filePath string) error {
+ err := os.Remove(filePath)
+ if err != nil {
+ return fmt.Errorf("failed to delete file: %v", err)
+ }
+ return nil
+}
+func CopyFile(src, dst string) error {
+ // Open the source file for reading
+ sourceFile, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf("failed to open source file: %v", err)
+ }
+ defer sourceFile.Close()
+
+ // Create the destination file
+ destinationFile, err := os.Create(dst)
+ if err != nil {
+ return fmt.Errorf("failed to create destination file: %v", err)
+ }
+ defer destinationFile.Close()
+
+ // Copy the content from source to destination
+ _, err = io.Copy(destinationFile, sourceFile)
+ if err != nil {
+ return fmt.Errorf("failed to copy file: %v", err)
+ }
+
+ // Flush the buffer to ensure all data is written
+ err = destinationFile.Sync()
+ if err != nil {
+ return fmt.Errorf("failed to sync destination file: %v", err)
+ }
+
+ return nil
+}
func ChangePermission(filePath string, mod int) {
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
- Fatalf("Error changing permissions of %s: %v\n", filePath, err)
+ Fatal("Error changing permissions of %s: %v\n", filePath, err)
}
}
@@ -69,15 +131,31 @@ func IsDirEmpty(name string) (bool, error) {
// TestDatabaseConnection tests the database connection
func TestDatabaseConnection() {
- Info("Testing database connection...")
- // Test database connection
- cmd := exec.Command("mysql", "-h", os.Getenv("DB_HOST"), "-P", os.Getenv("DB_PORT"), "-u", os.Getenv("DB_USERNAME"), "--password="+os.Getenv("DB_PASSWORD"), os.Getenv("DB_NAME"), "-e", "quit")
- err := cmd.Run()
- if err != nil {
- Fatal("Error testing database connection:", err)
+ dbHost := os.Getenv("DB_HOST")
+ dbPassword := os.Getenv("DB_PASSWORD")
+ dbUserName := os.Getenv("DB_USERNAME")
+ dbName := os.Getenv("DB_NAME")
+ dbPort := os.Getenv("DB_PORT")
+ if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
+ Fatal("Please make sure all required database environment variables are set")
+ } else {
+ Info("Connecting to database ...")
+
+ cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
+
+ // Capture the output
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ cmd.Stderr = &out
+ err := cmd.Run()
+ if err != nil {
+ slog.Error(fmt.Sprintf("Error testing database connection: %v\nOutput: %s\n", err, out.String()))
+ os.Exit(1)
+
+ }
+ Info("Successfully connected to database")
}
-
}
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
value, _ := cmd.Flags().GetString(flagName)
@@ -109,6 +187,37 @@ func SetEnv(key, value string) {
return
}
}
+func GetEnvVariable(envName, oldEnvName string) string {
+ value := os.Getenv(envName)
+ if value == "" {
+ value = os.Getenv(oldEnvName)
+ if value != "" {
+ err := os.Setenv(envName, value)
+ if err != nil {
+ return value
+ }
+ Worn("%s is deprecated, please use %s instead!\n", oldEnvName, envName)
+ }
+ }
+ return value
+}
func ShowHistory() {
}
+
+// CheckEnvVars checks if all the specified environment variables are set
+func CheckEnvVars(vars []string) error {
+ missingVars := []string{}
+
+ for _, v := range vars {
+ if os.Getenv(v) == "" {
+ missingVars = append(missingVars, v)
+ }
+ }
+
+ if len(missingVars) > 0 {
+ return fmt.Errorf("missing environment variables: %v", missingVars)
+ }
+
+ return nil
+}