Compare commits

...

8 Commits

13 changed files with 174 additions and 72 deletions

View File

@@ -18,7 +18,7 @@ compile:
docker-build:
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
docker-run: #docker-build
docker-run: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --prune --keep-last 2
docker-restore: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore -f ${FILE_NAME}

View File

@@ -50,6 +50,12 @@ To run a one time backup, bind your local volume to `/backup` in the container a
Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file
@@ -79,57 +85,45 @@ networks:
```
## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
### Simple Kubernetes CronJob usage:
### Simple Kubernetes backup Job :
```yaml
apiVersion: batch/v1
kind: CronJob
kind: Job
metadata:
name: bkup-job
name: backup
spec:
schedule: "0 1 * * *"
jobTemplate:
template:
spec:
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "5432"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- bkup
- backup
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
restartPolicy: Never
```
## Available image registries

View File

@@ -31,7 +31,9 @@ ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22"
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.2.1"
ENV VERSION="v1.2.2"
ENV BACKUP_CRON_EXPRESSION=""
ENV GNUPGHOME="/tmp/gnupg"
ARG WORKDIR="/app"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
@@ -40,7 +42,6 @@ ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
LABEL author="Jonas Kaninda"
RUN apt-get update -qq
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
RUN apt install mysql-client supervisor cron gnupg -y
# Clear cache
@@ -48,14 +49,16 @@ RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR
RUN mkdir -p $BACKUP_TMP_DIR && \
mkdir -p $GNUPGHOME
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT
chmod 777 $BACKUP_CRON_SCRIPT && \
chmod 777 $GNUPGHOME
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup
@@ -65,4 +68,18 @@ RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
WORKDIR $WORKDIR
# Create backup shell script
COPY <<EOF /usr/local/bin/backup
#!/bin/sh
# shellcheck disable=SC2068
/usr/local/bin/mysql-bkup backup $@
EOF
# Create restore shell script
COPY <<EOF /usr/local/bin/restore
#!/bin/sh
# shellcheck disable=SC2068
/usr/local/bin/mysql-bkup restore $@
EOF
RUN chmod +x /usr/local/bin/backup && \
chmod +x /usr/local/bin/restore
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]

View File

@@ -7,7 +7,7 @@ nav_order: 3
# Backup to SSH remote server
As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`.
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
{: .note }

View File

@@ -10,7 +10,60 @@ nav_order: 8
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
## Backup Job
## Backup to S3 storage
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup
spec:
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- bkup
- backup
- --storage
- s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
```
## Backup Job to SSH remote server
```yaml
apiVersion: batch/v1
@@ -177,7 +230,10 @@ spec:
```
## Kubernetes Rootless
This image also supports Kubernetes security context, you can run it in Rootless environment.
It has been tested on Openshift, it works well.
Deployment on Openshift is supported, you need to remove `securityContext` section on your yaml file.
```yaml
apiVersion: batch/v1

View File

@@ -11,7 +11,7 @@ The image supports encrypting backups using GPG out of the box. In case a `GPG_P
{: .warning }
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
To decrypt manually, you need to install gnupg
To decrypt manually, you need to install `gnupg`
### Decrypt backup

View File

@@ -45,6 +45,13 @@ To run a one time backup, bind your local volume to `/backup` in the container a
Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file
```yaml

View File

@@ -1,4 +1,4 @@
piVersion: batch/v1
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job

View File

@@ -22,7 +22,7 @@ func StartBackup(cmd *cobra.Command) {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
//Get flag value and set env
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
@@ -77,7 +77,7 @@ func scheduledMode(storage string) {
fmt.Println(" Starting MySQL Bkup... ")
fmt.Println("***********************************")
utils.Info("Running in Scheduled mode")
utils.Info("Execution period %s", os.Getenv("SCHEDULE_PERIOD"))
utils.Info("Execution period %s", os.Getenv("BACKUP_CRON_EXPRESSION"))
utils.Info("Storage type %s ", storage)
//Test database connexion
@@ -203,6 +203,8 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
if prune {
deleteOldBackup(backupRetention)
}
//Delete temp
deleteTemp()
}
func s3Backup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
@@ -216,7 +218,7 @@ func s3Backup(backupFileName string, disableCompression bool, prune bool, backup
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
}
utils.Info("Uploading backup file to S3 storage...")
utils.Info("Uploading backup archive to remote storage S3 ... ")
utils.Info("Backup name is %s", finalFileName)
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
if err != nil {
@@ -237,7 +239,9 @@ func s3Backup(backupFileName string, disableCompression bool, prune bool, backup
utils.Fatal("Error deleting old backup from S3: %s ", err)
}
}
utils.Done("Database has been backed up and uploaded to s3 ")
utils.Done("Uploading backup archive to remote storage S3 ... done ")
//Delete temp
deleteTemp()
}
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to Remote server")
@@ -248,7 +252,7 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
}
utils.Info("Uploading backup file to remote server...")
utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, remotePath)
if err != nil {
@@ -268,9 +272,10 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune
}
utils.Done("Database has been backed up and uploaded to remote server ")
utils.Done("Uploading backup archive to remote storage ... done ")
//Delete temp
deleteTemp()
}
func encryptBackup(backupFileName string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)

View File

@@ -71,4 +71,28 @@ func deleteOldBackup(retentionDays int) {
utils.Fatal(fmt.Sprintf("Error: %s", err))
return
}
utils.Done("Deleting old backups...done")
}
func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the current item is a file
if !info.IsDir() {
// Delete the file
err = os.Remove(path)
if err != nil {
return err
}
}
return nil
})
if err != nil {
utils.Error("Error deleting files: %v", err)
} else {
utils.Info("Deleting %s ... done", tmpPath)
}
}

View File

@@ -44,7 +44,7 @@ func restoreFromS3(file, bucket, s3Path string) {
utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil {
utils.Fatal(fmt.Sprintf("Error download file from s3 %s %s", file, err))
utils.Fatal("Error download file from s3 %s %v", file, err)
}
RestoreDatabase(file)
}
@@ -52,7 +52,7 @@ func restoreFromRemote(file, remotePath string) {
utils.Info("Restore database from remote server")
err := CopyFromRemote(file, remotePath)
if err != nil {
utils.Fatal(fmt.Sprintf("Error download file from remote server: ", filepath.Join(remotePath, file), err))
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
}
RestoreDatabase(file)
}
@@ -94,6 +94,7 @@ func RestoreDatabase(file string) {
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
utils.TestDatabaseConnection()
utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
// Restore from compressed file / .sql.gz
@@ -103,7 +104,10 @@ func RestoreDatabase(file string) {
if err != nil {
utils.Fatal("Error, in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else if extension == ".sql" {
//Restore from sql file
@@ -112,7 +116,10 @@ func RestoreDatabase(file string) {
if err != nil {
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
}
utils.Info("Restoring database... done")
utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else {
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
}

View File

@@ -54,7 +54,7 @@ set -e
}
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
`, os.Getenv("BACKUP_CRON_EXPRESSION"), cronLogFile)
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
utils.Fatal("Error writing to %s: %v\n", cronJob, err)

View File

@@ -1,8 +0,0 @@
#!/bin/sh
DB_USERNAME='db_username'
DB_PASSWORD='password'
DB_HOST='db_hostname'
DB_NAME='db_name'
BACKUP_DIR="$PWD/backup"
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:latest backup -d $DB_NAME