Compare commits

..

1 Commits

Author SHA1 Message Date
0df14f37b4 Merge pull request #159 from jkaninda/refactor
chore: add convert bytes to a human-readable string with the appropri…
2024-12-12 13:29:22 +01:00
32 changed files with 813 additions and 1237 deletions

View File

@@ -1,7 +1,7 @@
name: Build
on:
push:
branches: ['nightly']
branches: ['develop']
env:
BUILDKIT_IMAGE: jkaninda/mysql-bkup
jobs:
@@ -28,7 +28,7 @@ jobs:
file: "./Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7
build-args: |
appVersion=nightly
appVersion=develop-${{ github.sha }}
tags: |
"${{vars.BUILDKIT_IMAGE}}:nightly"
"${{vars.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"

View File

@@ -188,12 +188,13 @@ Documentation references Docker Hub, but all examples will work using ghcr.io ju
## References
We created this image as a simpler and more lightweight alternative to existing solutions. Heres why:
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
- **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
- **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
- **Kubernetes Support:** Designed to work seamlessly with Kubernetes.
- The original image is based on `Alpine` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.
- Kubernetes is supported.
## License

View File

@@ -45,10 +45,9 @@ var BackupCmd = &cobra.Command{
func init() {
//Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp, azure")
BackupCmd.PersistentFlags().StringP("path", "P", "", "Storage path without file name. e.g: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("cron-expression", "e", "", "Backup cron expression (e.g., `0 0 * * *` or `@daily`)")
BackupCmd.PersistentFlags().StringP("config", "c", "", "Configuration file for multi database backup. (e.g: `/backup/config.yaml`)")
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
}

View File

@@ -20,7 +20,7 @@ description: >- # this means to ignore newlines until "baseurl:"
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
baseurl: "" # the subpath of your site, e.g. /blog
url: "" # the base hostname & protocol for your site, e.g. http://example.com
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
twitter_username: jonaskaninda
github_username: jkaninda

View File

@@ -4,43 +4,22 @@ layout: default
parent: How Tos
nav_order: 5
---
# Azure Blob storage
# Backup to Azure Blob Storage
{: .note }
As described on local backup section, to change the storage of you backup and use Azure Blob as storage. You need to add `--storage azure` (-s azure).
You can also specify a folder where you want to save you data by adding `--path my-custom-path` flag.
To store your backups on Azure Blob Storage, you can configure the backup process to use the `--storage azure` option.
This section explains how to set up and configure Azure Blob-based backups.
## Backup to Azure Blob storage
---
## Configuration Steps
1. **Specify the Storage Type**
Add the `--storage azure` flag to your backup command.
2. **Set the Blob Path**
Optionally, specify a custom folder within your Azure Blob container where backups will be stored using the `--path` flag.
Example: `--path my-custom-path`.
3. **Required Environment Variables**
The following environment variables are mandatory for Azure Blob-based backups:
- `AZURE_STORAGE_CONTAINER_NAME`: The name of the Azure Blob container where backups will be stored.
- `AZURE_STORAGE_ACCOUNT_NAME`: The name of your Azure Storage account.
- `AZURE_STORAGE_ACCOUNT_KEY`: The access key for your Azure Storage account.
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to Azure Blob Storage:
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage azure -d database --path my-custom-path
@@ -50,23 +29,16 @@ services:
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Azure Blob Configuration
## Azure Blob configurations
- AZURE_STORAGE_CONTAINER_NAME=backup-container
- AZURE_STORAGE_ACCOUNT_NAME=account-name
- AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
# Ensure the mysql-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Custom Path**: Use the `--path` flag to specify a folder within your Azure Blob container for organizing backups.
- **Security**: Ensure your `AZURE_STORAGE_ACCOUNT_KEY` is kept secure and not exposed in public repositories.
- **Compatibility**: This configuration works with Azure Blob Storage and other compatible storage solutions.

View File

@@ -4,72 +4,41 @@ layout: default
parent: How Tos
nav_order: 4
---
# Backup to FTP remote server
# Backup to FTP Remote Server
To store your backups on an FTP remote server, you can configure the backup process to use the `--storage ftp` option.
As described for SSH backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
This section explains how to set up and configure FTP-based backups.
{: .note }
These environment variables are required for SSH backup `FTP_HOST`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`.
---
## Configuration Steps
1. **Specify the Storage Type**
Add the `--storage ftp` flag to your backup command.
2. **Set the Remote Path**
Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
Example: `--path /home/jkaninda/backups`.
3. **Required Environment Variables**
The following environment variables are mandatory for FTP-based backups:
- `FTP_HOST`: The hostname or IP address of the FTP server.
- `FTP_PORT`: The FTP port (default is `21`).
- `FTP_USER`: The username for FTP authentication.
- `FTP_PASSWORD`: The password for FTP authentication.
- `REMOTE_PATH`: The directory on the FTP server where backups will be stored.
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to an FTP remote server:
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage ftp -d database
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## FTP Configuration
## FTP config
- FTP_HOST="hostname"
- FTP_PORT=21
- FTP_USER=user
- FTP_PASSWORD=password
- REMOTE_PATH=/home/jkaninda/backups
# Ensure the mysql-bkup container is connected to the same network as your database
# pg-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Security**: FTP transmits data, including passwords, in plaintext. For better security, consider using SFTP (SSH File Transfer Protocol) or FTPS (FTP Secure) if supported by your server.
- **Remote Path**: Ensure the `REMOTE_PATH` directory exists on the FTP server and is writable by the specified `FTP_USER`.
```

View File

@@ -4,123 +4,85 @@ layout: default
parent: How Tos
nav_order: 2
---
# Backup to AWS S3
# Backup to AWS S3
To store your backups on AWS S3, you can configure the backup process to use the `--storage s3` option. This section explains how to set up and configure S3-based backups.
{: .note }
As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3).
You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag.
---
## Configuration Steps
## Backup to S3
1. **Specify the Storage Type**
Add the `--storage s3` flag to your backup command.
2. **Set the S3 Path**
Optionally, specify a custom folder within your S3 bucket where backups will be stored using the `--path` flag.
Example: `--path /my-custom-path`.
3. **Required Environment Variables**
The following environment variables are mandatory for S3-based backups:
- `AWS_S3_ENDPOINT`: The S3 endpoint URL (e.g., `https://s3.amazonaws.com`).
- `AWS_S3_BUCKET_NAME`: The name of the S3 bucket where backups will be stored.
- `AWS_REGION`: The AWS region where the bucket is located (e.g., `us-west-2`).
- `AWS_ACCESS_KEY`: Your AWS access key.
- `AWS_SECRET_KEY`: Your AWS secret key.
- `AWS_DISABLE_SSL`: Set to `"true"` if using an S3 alternative like Minio without SSL (default is `"false"`).
- `AWS_FORCE_PATH_STYLE`: Set to `"true"` if using an S3 alternative like Minio (default is `"false"`).
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to AWS S3:
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/pg-bkup/releases
# for available releases.
image: jkaninda/pg-bkup
container_name: pg-bkup
command: backup --storage s3 -d database --path /my-custom-path
environment:
- DB_PORT=5432
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS Configuration
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION=us-west-2
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## Optional: Disable SSL for S3 alternatives like Minio
- AWS_DISABLE_SSL="false"
## Optional: Enable path-style access for S3 alternatives like Minio
- AWS_FORCE_PATH_STYLE=false
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Recurring Backups to S3
To schedule recurring backups to S3, use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
### Example: Recurring Backup Configuration
```yaml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage s3 -d database --cron-expression "0 1 * * *"
command: backup --storage s3 -d database --path /my-custom-path
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS Configuration
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION=us-west-2
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## Optional: Define a cron schedule for recurring backups
#- BACKUP_CRON_EXPRESSION=0 1 * * *
## Optional: Delete old backups after a specified number of days
#- BACKUP_RETENTION_DAYS=7
## Optional: Disable SSL for S3 alternatives like Minio
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
## Optional: Enable path-style access for S3 alternatives like Minio
- AWS_FORCE_PATH_STYLE=false
# Ensure the pg-bkup container is connected to the same network as your database
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
### Recurring backups to S3
## Key Notes
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
- **S3 Alternatives**: If using an S3 alternative like Minio, set `AWS_DISABLE_SSL="true"` and `AWS_FORCE_PATH_STYLE="true"` as needed.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage s3 -d my-database --cron-expression "0 1 * * *"
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
#Delete old backup created more than specified days ago
#- BACKUP_RETENTION_DAYS=7
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -1,129 +1,91 @@
---
title: Backup to SSH or SFTP
title: Backup to SSH
layout: default
parent: How Tos
nav_order: 3
---
# Backup to SFTP or SSH Remote Server
# Backup to SSH remote server
To store your backups on an `SFTP` or `SSH` remote server instead of the default storage, you can configure the backup process to use the `--storage ssh` or `--storage remote` option.
This section explains how to set up and configure SSH-based backups.
---
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
## Configuration Steps
{: .note }
These environment variables are required for SSH backup `SSH_HOST`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
Accessing the remote server using password is not recommended, use private key instead.
1. **Specify the Storage Type**
Add the `--storage ssh` or `--storage remote` flag to your backup command.
2. **Set the Remote Path**
Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
Example: `--path /home/jkaninda/backups`.
3. **Required Environment Variables**
The following environment variables are mandatory for SSH-based backups:
- `SSH_HOST`: The hostname or IP address of the remote server.
- `SSH_USER`: The username for SSH authentication.
- `REMOTE_PATH`: The directory on the remote server where backups will be stored.
- `SSH_IDENTIFY_FILE`: The path to the private key file for SSH authentication.
- `SSH_PORT`: The SSH port (default is `22`).
- `SSH_PASSWORD`: (Optional) Use this only if you are not using a private key for authentication.
{: .note }
**Security Recommendation**: Using a private key (`SSH_IDENTIFY_FILE`) is strongly recommended over password-based authentication (`SSH_PASSWORD`) for better security.
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to an SSH remote server:
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage remote -d database
volumes:
- ./id_ed25519:/tmp/id_ed25519
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH Configuration
- SSH_HOST="hostname"
- SSH_PORT=22
- SSH_USER=user
- REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## Optional: Use password instead of private key (not recommended)
#- SSH_PASSWORD=password
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Recurring Backups to SSH Remote Server
To schedule recurring backups, you can use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable.
This allows you to define a cron schedule for automated backups.
### Example: Recurring Backup Configuration
```yaml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database --storage ssh --cron-expression "@daily"
command: backup --storage remote -d database
volumes:
- ./id_ed25519:/tmp/id_ed25519
- ./id_ed25519:/tmp/id_ed25519"
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_NAME=database
- DB_HOST=mysql
#- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH Configuration
## SSH config
- SSH_HOST="hostname"
- SSH_PORT=22
- SSH_USER=user
- REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## Optional: Delete old backups after a specified number of days
#- BACKUP_RETENTION_DAYS=7
## Optional: Use password instead of private key (not recommended)
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# Ensure the mysql-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Key Notes
### Recurring backups to SSH remote server
- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
- **Security**: Always prefer private key authentication (`SSH_IDENTIFY_FILE`) over password-based authentication (`SSH_PASSWORD`) for enhanced security.
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
You can use `--cron-expression "* * * * *"` or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
---
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database --storage ssh --cron-expression "0 1 * * *"
volumes:
- ./id_ed25519:/tmp/id_ed25519"
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST="hostname"
- SSH_PORT=22
- SSH_USER=user
- REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
#Delete old backup created more than specified days ago
#- BACKUP_RETENTION_DAYS=7
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -5,35 +5,26 @@ parent: How Tos
nav_order: 1
---
# Backup Database
# Backup database
To back up your database, use the `backup` command.
This section explains how to configure and run backups, including recurring backups, using Docker or Kubernetes.
---
## Default Configuration
- **Storage**: By default, backups are stored locally in the `/backup` directory.
- **Compression**: Backups are compressed using `gzip` by default. Use the `--disable-compression` flag to disable compression.
- **Security**: It is recommended to create a dedicated user with read-only access for backup tasks.
To backup the database, you need to add `backup` command.
{: .note }
The backup process supports recurring backups on Docker or Docker Swarm. On Kubernetes, it can be deployed as a CronJob.
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
---
{: .warning }
Creating a user for backup tasks who has read-only access is recommended!
## Example: Basic Backup Configuration
The backup process can be run in scheduled mode for the recurring backups.
It handles __recurring__ backups of mysql database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
Below is an example `docker-compose.yml` configuration for backing up a database:
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database
@@ -45,47 +36,36 @@ services:
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# Ensure the mysql-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
### Backup using Docker CLI
## Backup Using Docker CLI
You can also run backups directly using the Docker CLI:
```bash
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/pg-bkup backup -d database_name
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
---
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
## Recurring Backups
To schedule recurring backups, use the `--cron-expression (-e)` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
### Example: Recurring Backup Configuration
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database --cron-expression @midnight
command: backup -d database --cron-expression "0 1 * * *"
volumes:
- ./backup:/backup
environment:
@@ -94,24 +74,13 @@ services:
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Optional: Define a cron schedule for recurring backups
- BACKUP_CRON_EXPRESSION=@midnight
## Optional: Delete old backups after a specified number of days
- BACKUP_CRON_EXPRESSION=0 1 * * *
#Delete old backup created more than specified days ago
#- BACKUP_RETENTION_DAYS=7
# Ensure the mysql-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Cron Expression**: Use the `--cron-expression (-e)` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example:
- `@midnight`: Runs the backup daily at midnight.
- `0 1 * * *`: Runs the backup daily at 1:00 AM.
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.

View File

@@ -5,17 +5,12 @@ parent: How Tos
nav_order: 9
---
# Deploy on Kubernetes
## Deploy on Kubernetes
To deploy MySQL Backup on Kubernetes, you can use a `Job` for one-time backups or restores, and a `CronJob` for recurring backups.
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
Below are examples for different use cases.
---
## Backup Job to S3 Storage
This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an S3-compatible storage.
## Backup to S3 storage
```yaml
apiVersion: batch/v1
@@ -26,53 +21,50 @@ spec:
template:
spec:
containers:
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
- name: AWS_FORCE_PATH_STYLE
value: "false"
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
- name: AWS_FORCE_PATH_STYLE
value: "false"
restartPolicy: Never
```
---
## Backup Job to SSH Remote Server
This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an SSH remote server.
## Backup Job to SSH remote server
```yaml
apiVersion: batch/v1
@@ -85,14 +77,15 @@ spec:
spec:
containers:
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage ssh --disable-compression
- /bin/sh
- -c
- backup --storage ssh
resources:
limits:
memory: "128Mi"
@@ -105,8 +98,8 @@ spec:
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "postgres"
# Use Kubernetes Secrets for sensitive data like passwords
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
@@ -119,18 +112,14 @@ spec:
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/toto/backup"
# Optional: Required if you want to encrypt your backup
# Optional, required if you want to encrypt your backup
- name: GPG_PASSPHRASE
value: "xxxx"
value: "secure-passphrase"
restartPolicy: Never
```
---
## Restore Job
This example demonstrates how to configure a Kubernetes `Job` to restore a MySQL database from a backup stored on an SSH remote server.
```yaml
apiVersion: batch/v1
kind: Job
@@ -142,51 +131,48 @@ spec:
spec:
containers:
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- restore --storage ssh --file store_20231219_022941.sql.gz
- /bin/sh
- -c
- backup --storage ssh --file store_20231219_022941.sql.gz
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "postgres"
# Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/toto/backup"
# Optional: Required if your backup was encrypted
#- name: GPG_PASSPHRASE
# value: "xxxx"
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/xxxx/backup"
# Optional, required if your backup was encrypted
#- name: GPG_PASSPHRASE
# value: "xxxx"
restartPolicy: Never
```
---
## Recurring Backup with CronJob
This example demonstrates how to configure a Kubernetes `CronJob` for recurring backups to an SSH remote server.
## Recurring backup
```yaml
apiVersion: batch/v1
@@ -201,51 +187,51 @@ spec:
spec:
containers:
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage ssh --disable-compression
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "test"
- name: DB_USERNAME
value: "postgres"
# Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "192.168.1.16"
- name: SSH_PORT
value: "2222"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/config/backup"
- name: SSH_PASSWORD
value: "password"
# Optional: Required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "username"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "xxx"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: Never
```
---
## Kubernetes Rootless
## Kubernetes Rootless Deployment
This example demonstrates how to run the backup container in a rootless environment, suitable for platforms like OpenShift.
This image also supports Kubernetes security context, you can run it in Rootless environment.
It has been tested on Openshift, it works well.
```yaml
apiVersion: batch/v1
@@ -263,52 +249,53 @@ spec:
runAsGroup: 3000
fsGroup: 2000
containers:
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage ssh --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "test"
- name: DB_USERNAME
value: "postgres"
# Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "192.168.1.16"
- name: SSH_PORT
value: "2222"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/config/backup"
- name: SSH_PASSWORD
value: "password"
# Optional: Required if you want to encrypt your backup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "xxx"
- name: DB_USERNAME
value: "xxx"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: OnFailure
```
---
## Migrate Database
This example demonstrates how to configure a Kubernetes `Job` to migrate a MySQL database from one server to another.
## Migrate database
```yaml
apiVersion: batch/v1
@@ -321,9 +308,10 @@ spec:
spec:
containers:
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
@@ -334,11 +322,11 @@ spec:
memory: "128Mi"
cpu: "500m"
env:
## Source Database
## Source Database
- name: DB_HOST
value: "postgres"
value: "mysql"
- name: DB_PORT
value: "3306"
value: "3306"
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
@@ -347,7 +335,7 @@ spec:
value: "password"
## Target Database
- name: TARGET_DB_HOST
value: "target-postgres"
value: "target-mysql"
- name: TARGET_DB_PORT
value: "3306"
- name: TARGET_DB_NAME
@@ -357,13 +345,4 @@ spec:
- name: TARGET_DB_PASSWORD
value: "password"
restartPolicy: Never
```
---
## Key Notes
- **Security**: Always use Kubernetes Secrets for sensitive data like passwords and access keys.
- **Resource Limits**: Adjust resource limits (`memory` and `cpu`) based on your workload requirements.
- **Cron Schedule**: Use standard cron expressions for scheduling recurring backups.
- **Rootless Deployment**: The image supports running in rootless environments, making it suitable for platforms like OpenShift.
```

View File

@@ -1,38 +1,47 @@
---
title: Encrypt backups using GPG
title: Encrypt backups
layout: default
parent: How Tos
nav_order: 8
---
# Encrypt Backup
# Encrypt backup
The image supports encrypting backups using one of two methods: **GPG with a passphrase** or **GPG with a public key**. When a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted and saved as a `.sql.gpg` or `.sql.gz.gpg` file.
The image supports encrypting backups using one of two available methods: GPG with passphrase or GPG with a public key.
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
{: .warning }
To restore an encrypted backup, you must provide the same GPG passphrase or private key used during the backup process.
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
---
- GPG home directory `/config/gnupg`
- Cipher algorithm `aes256`
## Key Features
{: .note }
The backup encrypted using `GPG passphrase` method can be restored automatically, no need to decrypt it before restoration.
Suppose you used a GPG public key during the backup process. In that case, you need to decrypt your backup before restoration because decryption using a `GPG private` key is not fully supported.
- **Cipher Algorithm**: `aes256`
- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored automatically without manual decryption.
- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption before restoration.
To decrypt manually, you need to install `gnupg`
---
```shell
gpg --batch --passphrase "my-passphrase" \
--output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
Using your private key
## Using GPG Passphrase
```shell
gpg --output database_20240730_044201.sql.gz --decrypt database_20240730_044201.sql.gz.gpg
```
## Using GPG passphrase
To encrypt backups using a GPG passphrase, set the `GPG_PASSPHRASE` environment variable. The backup will be encrypted and can be restored automatically.
### Example Configuration
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database
@@ -46,34 +55,26 @@ services:
- DB_PASSWORD=password
## Required to encrypt backup
- GPG_PASSPHRASE=my-secure-passphrase
# Ensure the pg-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Using GPG Public Key
To encrypt backups using a GPG public key, set the `GPG_PUBLIC_KEY` environment variable to the path of your public key file. Backups encrypted with a public key require manual decryption before restoration.
### Example Configuration
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database
volumes:
- ./backup:/backup
- ./public_key.asc:/config/public_key.asc
environment:
- DB_PORT=3306
- DB_HOST=mysql
@@ -82,39 +83,9 @@ services:
- DB_PASSWORD=password
## Required to encrypt backup
- GPG_PUBLIC_KEY=/config/public_key.asc
# Ensure the pg-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Manual Decryption
If you encrypted your backup using a GPG public key, you must manually decrypt it before restoration. Use the `gnupg` tool for decryption.
### Decrypt Using a Passphrase
```bash
gpg --batch --passphrase "my-passphrase" \
--output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
### Decrypt Using a Private Key
```bash
gpg --output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
---
## Key Notes
- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored directly without manual decryption.
- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption using the corresponding private key.
- **Security**: Always keep your GPG passphrase and private key secure. Use Kubernetes Secrets or other secure methods to manage sensitive data.
```

View File

@@ -5,102 +5,76 @@ parent: How Tos
nav_order: 10
---
# Migrate Database
# Migrate database
To migrate a MySQL database from a source to a target database, you can use the `migrate` command. This feature simplifies the process by combining the backup and restore operations into a single step.
To migrate the database, you need to add `migrate` command.
{: .note }
The `migrate` command eliminates the need for separate backup and restore operations. It directly transfers data from the source database to the target database.
The Mysql backup has another great feature: migrating your database from a source database to a target.
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
{: .warning }
The `migrate` operation is **irreversible**. Always back up your target database before performing this action.
The `migrate` operation is irreversible, please backup your target database before this action.
---
## Configuration Steps
1. **Source Database**: Provide connection details for the source database.
2. **Target Database**: Provide connection details for the target database.
3. **Run the Migration**: Use the `migrate` command to initiate the migration.
---
## Example: Docker Compose Configuration
Below is an example `docker-compose.yml` configuration for migrating a database:
```yaml
### Docker compose
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: migrate
volumes:
- ./backup:/backup
environment:
## Source Database
## Source database
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Target Database
- TARGET_DB_HOST=target-postgres
## Target database
- TARGET_DB_HOST=target-mysql
- TARGET_DB_PORT=3306
- TARGET_DB_NAME=dbname
- TARGET_DB_USERNAME=username
- TARGET_DB_PASSWORD=password
# Ensure the mysql-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Migrate Database Using Docker CLI
### Migrate database using Docker CLI
You can also run the migration directly using the Docker CLI. Below is an example:
### Environment Variables
Save your source and target database connection details in an environment file (e.g., `your-env`):
```bash
## Source Database
DB_HOST=postgres
```
## Source database
DB_HOST=mysql
DB_PORT=3306
DB_NAME=dbname
DB_USERNAME=username
DB_PASSWORD=password
## Target Database
TARGET_DB_HOST=target-postgres
## Taget database
TARGET_DB_HOST=target-mysql
TARGET_DB_PORT=3306
TARGET_DB_NAME=dbname
TARGET_DB_USERNAME=username
TARGET_DB_PASSWORD=password
```
### Run the Migration
```bash
docker run --rm --network your_network_name \
--env-file your-env \
-v $PWD/backup:/backup/ \
jkaninda/pg-bkup migrate
```shell
docker run --rm --network your_network_name \
--env-file your-env
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup migrate
```
---
## Key Notes
- **Irreversible Operation**: The `migrate` command directly transfers data from the source to the target database. Ensure you have a backup of the target database before proceeding.
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your source and target databases.

View File

@@ -1,96 +1,63 @@
---
title: Run multiple database backup schedules in the same container
title: Run multiple backup schedules in the same container
layout: default
parent: How Tos
nav_order: 11
---
Multiple backup schedules with different configuration can be configured by mounting a configuration file into `/config/config.yaml` `/config/config.yml` or by defining an environment variable `BACKUP_CONFIG_FILE=/backup/config.yaml`.
# Multiple Backup Schedules
You can configure multiple backup schedules with different configurations by using a configuration file.
This file can be mounted into the container at `/config/config.yaml`, `/config/config.yml`, or specified via the `BACKUP_CONFIG_FILE` environment variable.
---
## Configuration File
The configuration file allows you to define multiple databases and their respective backup settings.
Below is an example configuration file:
## Configuration file
```yaml
# Optional: Define a global cron expression for scheduled backups
# cronExpression: "@every 20m"
cronExpression: ""
#cronExpression: "@every 20m" //Optional for scheduled backups
cronExpression: ""
databases:
- host: mysql1
port: 3306
name: database1
user: database1
password: password
path: /s3-path/database1 # For SSH or FTP, define the full path (e.g., /home/toto/backup/)
path: /s3-path/database1 #For SSH or FTP you need to define the full path (/home/toto/backup/)
- host: mysql2
port: 3306
name: lldap
user: lldap
password: password
path: /s3-path/lldap # For SSH or FTP, define the full path (e.g., /home/toto/backup/)
path: /s3-path/lldap #For SSH or FTP you need to define the full path (/home/toto/backup/)
- host: mysql3
port: 3306
name: keycloak
user: keycloak
password: password
path: /s3-path/keycloak # For SSH or FTP, define the full path (e.g., /home/toto/backup/)
path: /s3-path/keycloak #For SSH or FTP you need to define the full path (/home/toto/backup/)
- host: mysql4
port: 3306
name: joplin
user: joplin
password: password
path: /s3-path/joplin # For SSH or FTP, define the full path (e.g., /home/toto/backup/)
path: /s3-path/joplin #For SSH or FTP you need to define the full path (/home/toto/backup/)
```
---
## Docker Compose Configuration
To use the configuration file in a Docker Compose setup, mount the file and specify its path using the `BACKUP_CONFIG_FILE` environment variable.
### Example: Docker Compose File
## Docker compose file
```yaml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup #--config /backup/config.yaml # config file
command: backup
volumes:
- ./backup:/backup # Mount the backup directory
- ./config.yaml:/backup/config.yaml # Mount the configuration file
- ./backup:/backup
environment:
## Specify the path to the configuration file
## Multi backup config file
- BACKUP_CONFIG_FILE=/backup/config.yaml
# Ensure the pg-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Global Cron Expression**: You can define a global `cronExpression` in the configuration file to schedule backups for all databases. If omitted, backups will run immediately.
- **Database-Specific Paths**: For SSH or FTP storage, ensure the `path` field contains the full remote path (e.g., `/home/toto/backup/`).
- **Environment Variables**: Use the `BACKUP_CONFIG_FILE` environment variable to specify the path to the configuration file.
- **Security**: Avoid hardcoding sensitive information like passwords in the configuration file. Use environment variables or secrets management tools instead.
```

View File

@@ -4,20 +4,10 @@ layout: default
parent: How Tos
nav_order: 12
---
Send Email or Telegram notifications on successfully or failed backup.
# Receive Notifications
You can configure the system to send email or Telegram notifications when a backup succeeds or fails.
This section explains how to set up and customize notifications.
---
## Email Notifications
To send email notifications, provide SMTP credentials, a sender address, and recipient addresses. Notifications will be sent for both successful and failed backup runs.
### Example: Email Notification Configuration
### Email
To send out email notifications on failed or successfully backup runs, provide SMTP credentials, a sender and a recipient:
```yaml
services:
@@ -33,33 +23,25 @@ services:
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SMTP Configuration
- MAIL_HOST=smtp.example.com
- MAIL_HOST=
- MAIL_PORT=587
- MAIL_USERNAME=your-email@example.com
- MAIL_PASSWORD=your-email-password
- MAIL_USERNAME=
- MAIL_PASSWORD=!
- MAIL_FROM=Backup Jobs <backup@example.com>
## Multiple recipients separated by a comma
- MAIL_TO=me@example.com,team@example.com,manager@example.com
- MAIL_SKIP_TLS=false
## Time format for notifications
## Time format for notification
- TIME_FORMAT=2006-01-02 at 15:04:05
## Backup reference (e.g., database/cluster name or server name)
## Backup reference, in case you want to identify every backup instance
- BACKUP_REFERENCE=database/Paris cluster
networks:
- web
networks:
web:
```
---
## Telegram Notifications
To send Telegram notifications, provide your bot token and chat ID. Notifications will be sent for both successful and failed backup runs.
### Example: Telegram Notification Configuration
### Telegram
```yaml
services:
@@ -75,49 +57,41 @@ services:
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Telegram Configuration
- TG_TOKEN=[BOT ID]:[BOT TOKEN]
- TG_CHAT_ID=your-chat-id
## Time format for notifications
- TG_CHAT_ID=
## Time format for notification
- TIME_FORMAT=2006-01-02 at 15:04:05
## Backup reference (e.g., database/cluster name or server name)
## Backup reference, in case you want to identify every backup instance
- BACKUP_REFERENCE=database/Paris cluster
networks:
- web
networks:
web:
```
---
### Customize notifications
## Customize Notifications
The title and body of the notifications can be tailored to your needs using Go templates.
Template sources must be mounted inside the container in /config/templates:
You can customize the title and body of notifications using Go templates. Template files must be mounted inside the container at `/config/templates`. The following templates are supported:
- email.tmpl: Email notification template
- telegram.tmpl: Telegram notification template
- email-error.tmpl: Error notification template
- telegram-error.tmpl: Error notification template
- `email.tmpl`: Template for successful email notifications.
- `telegram.tmpl`: Template for successful Telegram notifications.
- `email-error.tmpl`: Template for failed email notifications.
- `telegram-error.tmpl`: Template for failed Telegram notifications.
### Data
### Template Data
Here is a list of all data passed to the template:
- `Database` : Database name
- `StartTime`: Backup start time process
- `EndTime`: Backup start time process
- `Storage`: Backup storage
- `BackupLocation`: Backup location
- `BackupSize`: Backup size
- `BackupReference`: Backup reference(eg: database/cluster name or server name)
The following data is passed to the templates:
> email.template:
- `Database`: Database name.
- `StartTime`: Backup start time.
- `EndTime`: Backup end time.
- `Storage`: Backup storage type (e.g., local, S3, SSH).
- `BackupLocation`: Backup file location.
- `BackupSize`: Backup file size in bytes.
- `BackupReference`: Backup reference (e.g., database/cluster name or server name).
- `Error`: Error message (only for error templates).
---
### Example Templates
#### `email.tmpl` (Successful Backup)
```html
<h2>Hi,</h2>
@@ -130,29 +104,29 @@ The following data is passed to the templates:
<li>Backup Storage: {{.Storage}}</li>
<li>Backup Location: {{.BackupLocation}}</li>
<li>Backup Size: {{.BackupSize}} bytes</li>
<li>Backup Reference: {{.BackupReference}}</li>
<li>Backup Reference: {{.BackupReference}} </li>
</ul>
<p>Best regards,</p>
```
#### `telegram.tmpl` (Successful Backup)
> telegram.template
```html
✅ Database Backup Notification {{.Database}}
Database Backup Notification {{.Database}}
Hi,
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
Backup Details:
- Database Name: {{.Database}}
- Backup Start Time: {{.StartTime}}
- Backup End Time: {{.EndTime}}
- Backup EndTime: {{.EndTime}}
- Backup Storage: {{.Storage}}
- Backup Location: {{.BackupLocation}}
- Backup Size: {{.BackupSize}} bytes
- Backup Reference: {{.BackupReference}}
```
#### `email-error.tmpl` (Failed Backup)
> email-error.template
```html
<!DOCTYPE html>
@@ -166,15 +140,16 @@ Backup Details:
<p>An error occurred during database backup.</p>
<h3>Failure Details:</h3>
<ul>
<li>Error Message: {{.Error}}</li>
<li>Date: {{.EndTime}}</li>
<li>Backup Reference: {{.BackupReference}}</li>
<li>Error Message: {{.Error}}</li>
<li>Date: {{.EndTime}}</li>
<li>Backup Reference: {{.BackupReference}} </li>
</ul>
</body>
</html>
```
#### `telegram-error.tmpl` (Failed Backup)
> telegram-error.template
```html
🔴 Urgent: Database Backup Failure Notification
@@ -184,14 +159,4 @@ Failure Details:
Error Message: {{.Error}}
Date: {{.EndTime}}
Backup Reference: {{.BackupReference}}
```
---
## Key Notes
- **SMTP Configuration**: Ensure your SMTP server supports TLS unless `MAIL_SKIP_TLS` is set to `true`.
- **Telegram Configuration**: Obtain your bot token and chat ID from Telegram.
- **Custom Templates**: Mount custom templates to `/config/templates` to override default notifications.
- **Time Format**: Use the `TIME_FORMAT` environment variable to customize the timestamp format in notifications.
```

View File

@@ -5,71 +5,45 @@ parent: How Tos
nav_order: 6
---
# Restore Database from S3 Storage
# Restore database from S3 storage
To restore a MySQL database from a backup stored in S3, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
- `.sql` (uncompressed SQL dump)
- `.sql.gz` (gzip-compressed SQL dump)
- `.sql.gpg` (GPG-encrypted SQL dump)
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
{: .note }
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
---
### Restore
## Configuration Steps
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
2. **Set the Storage Type**: Add the `--storage s3` flag to indicate that the backup is stored in S3.
3. **Provide S3 Configuration**: Include the necessary AWS S3 credentials and configuration.
4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
---
## Example: Restore from S3 Configuration
Below is an example `docker-compose.yml` configuration for restoring a database from S3 storage:
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
volumes:
- ./backup:/backup # Mount the directory for local operations (if needed)
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS S3 Configuration
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION=us-west-2
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## Optional: Disable SSL for S3 alternatives like Minio
- AWS_DISABLE_SSL=false
## Optional: Enable path-style access for S3 alternatives like Minio
- AWS_FORCE_PATH_STYLE=false
# Ensure the pg-bkup container is connected to the same network as your database
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
- **S3 Path**: Use the `--path` flag to specify the folder within the S3 bucket where the backup file is located.
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
- **S3 Alternatives**: For S3-compatible storage like Minio, set `AWS_DISABLE_SSL` and `AWS_FORCE_PATH_STYLE` as needed.
- **Network Configuration**: Ensure the `pg-bkup` container is connected to the same network as your database.

View File

@@ -4,71 +4,44 @@ layout: default
parent: How Tos
nav_order: 7
---
# Restore database from SSH remote server
# Restore Database from SSH Remote Server
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
To restore a MySQL database from a backup stored on an SSH remote server, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
{: .note }
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
- `.sql` (uncompressed SQL dump)
- `.sql.gz` (gzip-compressed SQL dump)
- `.sql.gpg` (GPG-encrypted SQL dump)
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
### Restore
---
## Configuration Steps
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
2. **Set the Storage Type**: Add the `--storage ssh` flag to indicate that the backup is stored on an SSH remote server.
3. **Provide SSH Configuration**: Include the necessary SSH credentials and configuration.
4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
---
## Example: Restore from SSH Remote Server Configuration
Below is an example `docker-compose.yml` configuration for restoring a database from an SSH remote server:
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
volumes:
- ./backup:/backup # Mount the directory for local operations (if needed)
- ./id_ed25519:/tmp/id_ed25519 # Mount the SSH private key file
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH Configuration
- SSH_HOST_NAME=hostname
## SSH config
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## Optional: Use password instead of private key (not recommended)
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# Ensure the mysql-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
- **SSH Path**: Use the `--path` flag to specify the folder on the SSH remote server where the backup file is located.
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
- **SSH Authentication**: Use a private key (`SSH_IDENTIFY_FILE`) for SSH authentication instead of a password for better security.
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.
```

View File

@@ -5,60 +5,36 @@ parent: How Tos
nav_order: 5
---
# Restore database
# Restore Database
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
To restore a MySQL database, use the `restore` command and specify the backup file to restore with the `--file` flag.
{: .note }
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
The system supports the following file formats:
### Restore
- `.sql` (uncompressed SQL dump)
- `.sql.gz` (gzip-compressed SQL dump)
- `.sql.gpg` (GPG-encrypted SQL dump)
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
---
## Configuration Steps
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
2. **Provide Database Credentials**: Ensure the correct database connection details are provided.
---
## Example: Restore Configuration
Below is an example `docker-compose.yml` configuration for restoring a database:
```yaml
```yml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: restore -d database -f store_20231219_022941.sql.gz
volumes:
- ./backup:/backup # Mount the directory containing the backup file
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# Ensure the pg-bkup container is connected to the same network as your database
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.
```

View File

@@ -10,76 +10,175 @@ nav_order: 1
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
It supports a variety of storage options and ensures data security through GPG encryption.
---
## Features
## Key Features
- **Storage Options:**
- Local storage
- AWS S3 or any S3-compatible object storage
- FTP
- SSH-compatible storage
- Azure Blob storage
### Storage Options
- **Local storage**
- **AWS S3** or any S3-compatible object storage
- **FTP**
- **SFTP**
- **SSH-compatible storage**
- **Azure Blob storage**
- **Data Security:**
- Backups can be encrypted using **GPG** to ensure confidentiality.
### Data Security
- Backups can be encrypted using **GPG** to ensure data confidentiality.
- **Deployment Flexibility:**
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- Supports recurring backups of MySQL databases when deployed:
- On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes.
### Deployment Flexibility
- Available as the [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- Supports recurring backups of MySQL databases:
- On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes.
### Notifications
- Receive real-time updates on backup success or failure via:
- **Telegram**
- **Email**
---
- **Notifications:**
- Get real-time updates on backup success or failure via:
- **Telegram**
- **Email**
## Use Cases
- **Automated Recurring Backups:** Schedule regular backups for MySQL databases.
- **Cross-Environment Migration:** Easily migrate MySQL databases across different environments using supported storage options.
- **Cross-Environment Migration:** Easily migrate your MySQL databases across different environments using supported storage options.
- **Secure Backup Management:** Protect your data with GPG encryption.
---
## Get Involved
We welcome contributions! Feel free to give us a ⭐, submit PRs, or open issues on our [GitHub repository](https://github.com/jkaninda/mysql-bkup).
{: .fs-6 .fw-300 }
---
{: .note }
Code and documentation for the `v1` version are available on [this branch][v1-branch].
Code and documentation for `v1` version on [this branch][v1-branch].
[v1-branch]: https://github.com/jkaninda/mysql-bkup
---
## Available Image Registries
## Quickstart
The Docker image is published to both **Docker Hub** and the **GitHub Container Registry**. You can use either of the following:
### Simple backup using Docker CLI
```bash
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
- TZ=Europe/Paris
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Docker recurring backup
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight
```
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
```
docker pull jkaninda/mysql-bkup
docker pull ghcr.io/jkaninda/mysql-bkup
```
While the documentation references Docker Hub, all examples work seamlessly with `ghcr.io`.
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
---
## Supported Engines
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
## References
We created this image as a simpler and more lightweight alternative to existing solutions. Heres why:
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
- **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
- **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
- **Kubernetes Support:** Designed to work seamlessly with Kubernetes.
- The original image is based on `alpine` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.
- Kubernetes is supported.

View File

@@ -1,138 +0,0 @@
---
title: Quickstart
layout: home
nav_order: 2
---
# Quickstart
This guide provides quick examples for running backups using Docker CLI, Docker Compose, and Kubernetes.
---
## Simple Backup Using Docker CLI
To run a one-time backup, bind your local volume to `/backup` in the container and execute the `backup` command:
```bash
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
### Using an Environment File
Alternatively, you can use an `--env-file` to pass a full configuration:
```bash
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
---
## Simple Backup Using Docker Compose
Below is an example `docker-compose.yml` configuration for running a backup:
```yaml
services:
mysql-bkup:
# In production, lock the image tag to a specific release version.
# Check https://github.com/jkaninda/mysql-bkup/releases for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
- TZ=Europe/Paris
# Ensure the mysql-bkup container is connected to the same network as your database.
networks:
- web
networks:
web:
```
---
## Recurring Backup with Docker
To schedule recurring backups, use the `--cron-expression` flag:
```bash
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m"
```
For predefined schedules, refer to the [documentation](https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules).
---
## Backup Using Kubernetes
Below is an example Kubernetes `Job` configuration for running a backup:
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, lock the image tag to a specific release version.
# Check https://github.com/jkaninda/mysql-bkup/releases for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "postgres"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # Directory location on the host
type: Directory # Optional field
restartPolicy: Never
```
---
## Key Notes
- **Volume Binding**: Ensure the `/backup` directory is mounted to persist backup files.
- **Environment Variables**: Use environment variables or an `--env-file` to pass database credentials and other configurations.
- **Cron Expressions**: Use standard cron expressions or predefined schedules for recurring backups.
- **Kubernetes Jobs**: Use Kubernetes `Job` or `CronJob` for running backups in a Kubernetes cluster.

View File

@@ -1,127 +1,139 @@
---
title: Configuration Reference
layout: default
nav_order: 3
nav_order: 2
---
# Configuration Reference
# Configuration reference
Backup, restore, and migration targets, schedules, and retention policies are configured using **environment variables** or **CLI flags**.
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
### CLI utility Usage
| Options | Shorts | Usage |
|-----------------------|--------|----------------------------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| migrate | | Migrate database from one instance to another one |
| --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name for restoration |
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
| --dbname | -d | Database name |
| --port | -p | Database port (default: 3306) |
| --disable-compression | | Disable database backup compression |
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Environment variables
| Name | Requirement | Description |
|------------------------------|---------------------------------------------------------------|-----------------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| AWS_FORCE_PATH_STYLE | Optional, required for S3 storage | Force path style |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| GPG_PUBLIC_KEY | Optional, required to encrypt backup | GPG public key, used to encrypt backup (/config/public_key.asc) |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
| BACKUP_RETENTION_DAYS | Optional | Delete old backup created more than specified days ago |
| SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
| FTP_HOST | Optional, required for FTP storage | FTP host name |
| FTP_PORT | Optional, required for FTP storage | FTP server port number |
| FTP_USER | Optional, required for FTP storage | FTP user |
| FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
| TZ | Optional | Time Zone |
| AZURE_STORAGE_CONTAINER_NAME | Optional, required for Azure Blob Storage storage | Azure storage container name |
| AZURE_STORAGE_ACCOUNT_NAME | Optional, required for Azure Blob Storage storage | Azure storage account name |
| AZURE_STORAGE_ACCOUNT_KEY | Optional, required for Azure Blob Storage storage | Azure storage account key |
---
## Run in Scheduled mode
## CLI Utility Usage
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--cron-expression "* * * * *"` flag or by defining `BACKUP_CRON_EXPRESSION=0 1 * * *` environment variable.
| Option | Short Flag | Description |
|-------------------------|------------|-------------------------------------------------------------------------------|
| `pg-bkup` | `bkup` | CLI utility for managing PostgreSQL backups. |
| `backup` | | Perform a backup operation. |
| `restore` | | Perform a restore operation. |
| `migrate` | | Migrate a database from one instance to another. |
| `--storage` | `-s` | Storage type (`local`, `s3`, `ssh`, etc.). Default: `local`. |
| `--file` | `-f` | File name for restoration. |
| `--path` | | Path for storage (e.g., `/custom_path` for S3 or `/home/foo/backup` for SSH). |
| `--config` | `-c` | Configuration file for multi database backup. (e.g: `/backup/config.yaml`). |
| `--dbname` | `-d` | Database name. |
| `--port` | `-p` | Database port. Default: `3306`. |
| `--disable-compression` | | Disable compression for database backups. |
| `--cron-expression` | `-e` | Cron expression for scheduled backups (e.g., `0 0 * * *` or `@daily`). |
| `--help` | `-h` | Display help message and exit. |
| `--version` | `-V` | Display version information and exit. |
## Syntax of crontab (field description)
---
The syntax is:
## Environment Variables
- 1: Minute (0-59)
- 2: Hours (0-23)
- 3: Day (0-31)
- 4: Month (0-12 [12 == December])
- 5: Day of the week(0-7 [7 or 0 == sunday])
| Name | Requirement | Description |
|--------------------------------|--------------------------------------|----------------------------------------------------------------------------|
| `DB_PORT` | Optional (default: `3306`) | Database port number. |
| `DB_HOST` | Required | Database host. |
| `DB_NAME` | Optional (if provided via `-d` flag) | Database name. |
| `DB_USERNAME` | Required | Database username. |
| `DB_PASSWORD` | Required | Database password. |
| `AWS_ACCESS_KEY` | Required for S3 storage | AWS S3 Access Key. |
| `AWS_SECRET_KEY` | Required for S3 storage | AWS S3 Secret Key. |
| `AWS_BUCKET_NAME` | Required for S3 storage | AWS S3 Bucket Name. |
| `AWS_REGION` | Required for S3 storage | AWS Region. |
| `AWS_DISABLE_SSL` | Optional | Disable SSL for S3 storage. |
| `AWS_FORCE_PATH_STYLE` | Optional | Force path-style access for S3 storage. |
| `FILE_NAME` | Optional (if provided via `--file`) | File name for restoration (e.g., `.sql`, `.sql.gz`). |
| `GPG_PASSPHRASE` | Optional | GPG passphrase for encrypting/decrypting backups. |
| `GPG_PUBLIC_KEY` | Optional | GPG public key for encrypting backups (e.g., `/config/public_key.asc`). |
| `BACKUP_CRON_EXPRESSION` | Optional (flag `-e`) | Cron expression for scheduled backups. |
| `BACKUP_RETENTION_DAYS` | Optional | Delete backups older than the specified number of days. |
| `BACKUP_CONFIG_FILE` | Optional (flag `-c`) | Configuration file for multi database backup. (e.g: `/backup/config.yaml`) |
| `SSH_HOST` | Required for SSH storage | SSH remote hostname or IP. |
| `SSH_USER` | Required for SSH storage | SSH remote username. |
| `SSH_PASSWORD` | Optional | SSH remote user's password. |
| `SSH_IDENTIFY_FILE` | Optional | SSH remote user's private key. |
| `SSH_PORT` | Optional (default: `22`) | SSH remote server port. |
| `REMOTE_PATH` | Required for SSH/FTP storage | Remote path (e.g., `/home/toto/backup`). |
| `FTP_HOST` | Required for FTP storage | FTP hostname. |
| `FTP_PORT` | Optional (default: `21`) | FTP server port. |
| `FTP_USER` | Required for FTP storage | FTP username. |
| `FTP_PASSWORD` | Required for FTP storage | FTP user password. |
| `TARGET_DB_HOST` | Required for migration | Target database host. |
| `TARGET_DB_PORT` | Optional (default: `5432`) | Target database port. |
| `TARGET_DB_NAME` | Required for migration | Target database name. |
| `TARGET_DB_USERNAME` | Required for migration | Target database username. |
| `TARGET_DB_PASSWORD` | Required for migration | Target database password. |
| `TARGET_DB_URL` | Optional | Target database URL in JDBC URI format. |
| `TG_TOKEN` | Required for Telegram notifications | Telegram token (`BOT-ID:BOT-TOKEN`). |
| `TG_CHAT_ID` | Required for Telegram notifications | Telegram Chat ID. |
| `TZ` | Optional | Time zone for scheduling. |
| `AZURE_STORAGE_CONTAINER_NAME` | Required for Azure Blob Storage | Azure storage container name. |
| `AZURE_STORAGE_ACCOUNT_NAME` | Required for Azure Blob Storage | Azure storage account name. |
| `AZURE_STORAGE_ACCOUNT_KEY` | Required for Azure Blob Storage | Azure storage account key. |
---
## Scheduled Backups
### Running in Scheduled Mode
- **Docker**: Use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable to schedule backups.
- **Kubernetes**: Use a `CronJob` resource for scheduled backups.
### Cron Syntax
The cron syntax consists of five fields:
Easy to remember format:
```conf
* * * * * command
* * * * * command to be executed
```
| Field | Description | Values |
|---------------|------------------------------|----------------|
| Minute | Minute of the hour | `0-59` |
| Hour | Hour of the day | `0-23` |
| Day of Month | Day of the month | `1-31` |
| Month | Month of the year | `1-12` |
| Day of Week | Day of the week (0 = Sunday) | `0-7` |
```conf
- - - - -
| | | | |
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
| | | ------- Month (1 - 12)
| | --------- Day of month (1 - 31)
| ----------- Hour (0 - 23)
------------- Minute (0 - 59)
```
#### Examples
- **Every 30 minutes**: `*/30 * * * *`
- **Every hour at minute 0**: `0 * * * *`
- **Every day at 1:00 AM**: `0 1 * * *`
### Predefined Schedules
| Entry | Description | Equivalent To |
|----------------------------|--------------------------------------------|---------------|
| `@yearly` (or `@annually`) | Run once a year, midnight, Jan. 1st | `0 0 1 1 *` |
| `@monthly` | Run once a month, midnight, first of month | `0 0 1 * *` |
| `@weekly` | Run once a week, midnight between Sat/Sun | `0 0 * * 0` |
| `@daily` (or `@midnight`) | Run once a day, midnight | `0 0 * * *` |
| `@hourly` | Run once an hour, beginning of hour | `0 * * * *` |
### Intervals
You can also schedule backups at fixed intervals using the format:
> At every 30th minute
```conf
*/30 * * * *
```
> “At minute 0.” every hour
```conf
0 * * * *
```
> “At 01:00.” every day
```conf
0 1 * * *
```
## Predefined schedules
You may use one of several pre-defined schedules in place of a cron expression.
| Entry | Description | Equivalent To |
|------------------------|--------------------------------------------|---------------|
| @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * |
| @monthly | Run once a month, midnight, first of month | 0 0 1 * * |
| @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 |
| @daily (or @midnight) | Run once a day, midnight | 0 0 * * * |
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
### Intervals
You may also schedule backup task at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
@every <duration>
```
where "duration" is a string accepted by time.
- Example: `@every 1h30m10s` runs the backup every 1 hour, 30 minutes, and 10 seconds.
For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that.

1
go.mod
View File

@@ -26,7 +26,6 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jkaninda/go-utils v0.0.0-20250122060806-26119182077a // indirect
github.com/jlaffaye/ftp v0.2.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/pkg/errors v0.9.1 // indirect

4
go.sum
View File

@@ -43,10 +43,6 @@ github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e h1:jtFKZHt/PLGQ
github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e/go.mod h1:Y1EXpPWQ9PNd7y7E6ez3xgnzZc8fuDWXwX/1/dXNCE4=
github.com/jkaninda/go-storage v0.1.3 h1:lEpHVgFLKSvjsi/6tAek96Y07za3vxmsXF2/+jiCMZU=
github.com/jkaninda/go-storage v0.1.3/go.mod h1:zVRnLprBk/9AUz2+za6Y03MgoNYrqKLy3edVtjqMaps=
github.com/jkaninda/go-utils v0.0.0-20250122054739-d330fecee150 h1:AgcKk58P/Z+u4DBE2MTyZ6kCweA89YUpX7TuttHS3oQ=
github.com/jkaninda/go-utils v0.0.0-20250122054739-d330fecee150/go.mod h1:pf0/U6k4JbxlablM2G4eSTZdQ2LFshfAsCK5Q8qNfGo=
github.com/jkaninda/go-utils v0.0.0-20250122060806-26119182077a h1:ZTpKujQGhEF266RWkD2cXnCsafk3R2+sGtfbzCQSs1s=
github.com/jkaninda/go-utils v0.0.0-20250122060806-26119182077a/go.mod h1:pf0/U6k4JbxlablM2G4eSTZdQ2LFshfAsCK5Q8qNfGo=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=

View File

@@ -27,7 +27,6 @@ package pkg
import (
"fmt"
"github.com/jkaninda/go-storage/pkg/azure"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils"
"os"
@@ -37,6 +36,7 @@ import (
func azureBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Azure Blob Storage")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
@@ -87,8 +87,6 @@ func azureBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Uploading backup archive to Azure Blob storage ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
@@ -96,11 +94,12 @@ func azureBackup(db *dbConfig, config *BackupConfig) {
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
Duration: duration,
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
utils.Info("Backup successfully completed in %s", duration)
utils.Info("Backup completed successfully")
}
func azureRestore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from Azure Blob storage")

View File

@@ -29,7 +29,6 @@ import (
"fmt"
"github.com/jkaninda/encryptor"
"github.com/jkaninda/go-storage/pkg/local"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
@@ -108,7 +107,6 @@ func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
// BackupTask backups database
func BackupTask(db *dbConfig, config *BackupConfig) {
utils.Info("Starting backup task...")
startTime = time.Now()
// Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
if config.disableCompression {
@@ -120,7 +118,7 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
localBackup(db, config)
case "s3", "S3":
s3Backup(db, config)
case "ssh", "SSH", "remote", "sftp":
case "ssh", "SSH", "remote":
sshBackup(db, config)
case "ftp", "FTP":
ftpBackup(db, config)
@@ -131,7 +129,7 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
}
}
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Starting Multi backup task...")
utils.Info("Starting backup task...")
conf, err := readConf(configFile)
if err != nil {
utils.Fatal("Error reading config file: %s", err)
@@ -258,6 +256,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
}
func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage")
startTime = time.Now().Format(utils.TimeFormat())
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
@@ -280,8 +279,6 @@ func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup name is %s", finalFileName)
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
@@ -289,7 +286,8 @@ func localBackup(db *dbConfig, config *BackupConfig) {
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(storagePath, finalFileName),
Duration: duration,
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete old backup
if config.prune {
@@ -301,7 +299,7 @@ func localBackup(db *dbConfig, config *BackupConfig) {
}
// Delete temp
deleteTemp()
utils.Info("Backup successfully completed in %s", duration)
utils.Info("Backup completed successfully")
}
func encryptBackup(config *BackupConfig) {

View File

@@ -214,7 +214,6 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "path", "REMOTE_PATH")
utils.GetEnv(cmd, "config", "BACKUP_CONFIG_FILE")
// Get flag value and set env
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")

View File

@@ -155,8 +155,6 @@ func readConf(configFile string) (*Config, error) {
// checkConfigFile checks config files and returns one config file
func checkConfigFile(filePath string) (string, error) {
// Remove the quotes
filePath = strings.Trim(filePath, `"`)
// Define possible config file names
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}

View File

@@ -28,7 +28,6 @@ import (
"fmt"
"github.com/jkaninda/go-storage/pkg/ftp"
"github.com/jkaninda/go-storage/pkg/ssh"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils"
"os"
@@ -38,6 +37,7 @@ import (
func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
@@ -91,8 +91,6 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
}
utils.Info("Uploading backup archive to remote storage ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
@@ -100,11 +98,12 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
Duration: duration,
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
utils.Info("Backup successfully completed in %s", duration)
utils.Info("Backup completed successfully")
}
func remoteRestore(db *dbConfig, conf *RestoreConfig) {
@@ -154,6 +153,7 @@ func ftpRestore(db *dbConfig, conf *RestoreConfig) {
}
func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
@@ -203,7 +203,6 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup name is %s", finalFileName)
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Uploading backup archive to the remote FTP server ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification
utils.NotifySuccess(&utils.NotificationData{
@@ -212,9 +211,10 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
Duration: duration,
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
utils.Info("Backup successfully completed in %s", duration)
utils.Info("Backup completed successfully")
}

View File

@@ -27,7 +27,6 @@ package pkg
import (
"fmt"
"github.com/jkaninda/go-storage/pkg/s3"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils"
"os"
@@ -38,6 +37,7 @@ import (
func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
@@ -91,7 +91,6 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
utils.Info("Uploading backup archive to remote storage S3 ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
@@ -99,11 +98,12 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
Duration: duration,
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
utils.Info("Backup successfully completed in %s", duration)
utils.Info("Backup completed successfully")
}
func s3Restore(db *dbConfig, conf *RestoreConfig) {

View File

@@ -24,8 +24,6 @@ SOFTWARE.
package pkg
import "time"
const tmpPath = "/tmp/backup"
const gpgHome = "/config/gnupg"
const gpgExtension = "gpg"
@@ -41,7 +39,7 @@ var (
encryption = false
usingKey = false
backupSize int64 = 0
startTime = time.Now()
startTime string
)
// dbHVars Required environment variables for database

View File

@@ -52,7 +52,8 @@
<h3>Backup Details:</h3>
<ul>
<li><strong>Database Name:</strong> {{.Database}}</li>
<li><strong>Backup Duration:</strong> {{.Duration}}</li>
<li><strong>Backup Start Time:</strong> {{.StartTime}}</li>
<li><strong>Backup End Time:</strong> {{.EndTime}}</li>
<li><strong>Backup Storage:</strong> {{.Storage}}</li>
<li><strong>Backup Location:</strong> {{.BackupLocation}}</li>
<li><strong>Backup Size:</strong> {{.BackupSize}}</li>

View File

@@ -6,7 +6,8 @@ Please find the details below:
Backup Details:
- Database Name: {{.Database}}
- Backup Duration: {{.Duration}}
- Backup Start Time: {{.StartTime}}
- Backup EndTime: {{.EndTime}}
- Backup Storage: {{.Storage}}
- Backup Location: {{.BackupLocation}}
- Backup Size: {{.BackupSize}}

View File

@@ -39,7 +39,8 @@ type NotificationData struct {
File string
BackupSize string
Database string
Duration string
StartTime string
EndTime string
Storage string
BackupLocation string
BackupReference string