Merge pull request #85 from jkaninda/ftp

feat: add ftp backup storage
This commit is contained in:
2024-09-30 00:04:03 +02:00
committed by GitHub
32 changed files with 540 additions and 343 deletions

View File

@@ -1,5 +1,5 @@
# PostgreSQL Backup
PostgreSQL Backup is a Docker container image that can be used to backup, restore and migrate Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
PostgreSQL Backup is a Docker container image that can be used to backup, restore and migrate Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH compatible storage.
It also supports __encrypting__ your backups using GPG.
The [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/pg-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
@@ -87,6 +87,18 @@ services:
networks:
web:
```
### Docker recurring backup
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/pg-bkup backup -d dbName --cron-expression "@every 1m"
```
See: https://jkaninda.github.io/pg-bkup/reference/#predefined-schedules
## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
@@ -155,7 +167,7 @@ While it may work against different implementations, there are no guarantees abo
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
- The original image is based on `Alpine` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.

View File

@@ -30,7 +30,8 @@ func init() {
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
BackupCmd.PersistentFlags().StringP("period", "", "", "Schedule period time | Deprecated")
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")

View File

@@ -9,7 +9,7 @@ RUN go mod download
# Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/pg-bkup
FROM ubuntu:24.04
FROM alpine:3.20.3
ENV DB_HOST=""
ENV DB_NAME=""
ENV DB_USERNAME=""
@@ -25,53 +25,42 @@ ENV AWS_REGION="us-west-2"
ENV AWS_DISABLE_SSL="false"
ENV GPG_PASSPHRASE=""
ENV SSH_USER=""
ENV SSH_REMOTE_PATH=""
ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22"
ENV SSH_PORT=22
ENV REMOTE_PATH=""
ENV FTP_HOST_NAME=""
ENV FTP_PORT=21
ENV FTP_USER=""
ENV FTP_PASSWORD=""
ENV TARGET_DB_HOST=""
ENV TARGET_DB_PORT=5432
ENV TARGET_DB_NAME=""
ENV TARGET_DB_USERNAME=""
ENV TARGET_DB_PASSWORD=""
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.2.5"
ENV VERSION="v1.2.8"
ENV BACKUP_CRON_EXPRESSION=""
ENV TG_TOKEN=""
ENV TG_CHAT_ID=""
ARG WORKDIR="/config"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
LABEL author="Jonas Kaninda"
RUN apt-get update -qq
RUN apt install postgresql-client supervisor cron gnupg -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN apk --update add postgresql-client gnupg
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $WORKDIR && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT
RUN chmod 777 $WORKDIR
COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup
RUN chmod +x /usr/local/bin/pg-bkup
RUN ln -s /usr/local/bin/pg-bkup /usr/local/bin/bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
# Create the backup script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/pg-bkup backup "$@"' > /usr/local/bin/backup && \
chmod +x /usr/local/bin/backup

View File

@@ -1,13 +0,0 @@
[supervisord]
nodaemon=true
user=root
logfile=/var/log/supervisor/supervisord.log
pidfile=/var/run/supervisord.pid
[program:cron]
command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15"
autostart=true
autorestart=true
user = root
stderr_logfile=/var/log/cron.err.log
stdout_logfile=/var/log/cron.out.log

BIN
docs/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

View File

@@ -0,0 +1,44 @@
---
title: Backup to FTP remote server
layout: default
parent: How Tos
nav_order: 4
---
# Backup to FTP remote server
As described for s3 backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note }
These environment variables are required for SSH backup `FTP_HOST_NAME`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`.
```yml
services:
pg-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/pg-bkup/releases
# for a list of available releases.
image: jkaninda/pg-bkup
container_name: pg-bkup
command: backup --storage ftp -d database
environment:
- DB_PORT=5432
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## FTP config
- FTP_HOST_NAME="hostname"
- FTP_PORT=21
- FTP_USER=user
- FTP_PASSWORD=password
- REMOTE_PATH=/home/jkaninda/backups
# pg-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -48,7 +48,7 @@ networks:
### Recurring backups to S3
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
```yml
services:
@@ -72,6 +72,7 @@ services:
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# pg-bkup container must be connected to the same network with your database

View File

@@ -8,10 +8,10 @@ nav_order: 3
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note }
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
Accessing the remote server using password is not recommended, use private key instead.
```yml
@@ -36,7 +36,7 @@ services:
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
@@ -52,7 +52,7 @@ networks:
### Recurring backups to SSH remote server
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
You can use `--cron-expression "* * * * *"` or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
```yml
services:
@@ -63,10 +63,7 @@ services:
# for a list of available releases.
image: jkaninda/pg-bkup
container_name: pg-bkup
command:
- /bin/sh
- -c
- pg-bkup backup -d database --storage ssh --mode scheduled --period "0 1 * * *"
command: backup -d database --storage ssh --cron-expression "0 1 * * *"
volumes:
- ./id_ed25519:/tmp/id_ed25519"
environment:
@@ -79,7 +76,7 @@ services:
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
@@ -133,7 +130,7 @@ spec:
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_REMOTE_PATH
- name: REMOTE_PATH
value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY
value: "xxxx"

View File

@@ -56,7 +56,7 @@ networks:
jkaninda/pg-bkup backup -d database_name
```
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
```yml
services:
@@ -67,7 +67,7 @@ services:
# for a list of available releases.
image: jkaninda/pg-bkup
container_name: pg-bkup
#command: backup -d database --mode scheduled --period "0 1 * * *"
command: backup -d database --cron-expression "0 1 * * *"
volumes:
- ./backup:/backup
environment:
@@ -76,6 +76,7 @@ services:
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
- BACKUP_CRON_EXPRESSION=0 1 * * *
# pg-bkup container must be connected to the same network with your database
networks:
- web

View File

@@ -2,7 +2,7 @@
title: Deploy on Kubernetes
layout: default
parent: How Tos
nav_order: 8
nav_order: 9
---
## Deploy on Kubernetes
@@ -193,7 +193,6 @@ spec:
command:
- /bin/sh
- -c
- bkup
- backup --storage ssh --disable-compression
resources:
limits:

View File

@@ -2,7 +2,7 @@
title: Encrypt backups using GPG
layout: default
parent: How Tos
nav_order: 7
nav_order: 8
---
# Encrypt backup
@@ -11,6 +11,9 @@ The image supports encrypting backups using GPG out of the box. In case a `GPG_P
{: .warning }
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
- GPG home directory `/config/gnupg`
- Cipher algorithm `aes256`
-
To decrypt manually, you need to install `gnupg`
```shell

View File

@@ -2,7 +2,7 @@
title: Migrate database
layout: default
parent: How Tos
nav_order: 9
nav_order: 10
---
# Migrate database

View File

@@ -2,7 +2,7 @@
title: Restore database from AWS S3
layout: default
parent: How Tos
nav_order: 5
nav_order: 6
---
# Restore database from S3 storage

View File

@@ -2,7 +2,7 @@
title: Restore database from SSH
layout: default
parent: How Tos
nav_order: 6
nav_order: 7
---
# Restore database from SSH remote server

View File

@@ -2,7 +2,7 @@
title: Restore database
layout: default
parent: How Tos
nav_order: 4
nav_order: 5
---
# Restore database

View File

@@ -6,7 +6,7 @@ nav_order: 1
# About pg-bkup
{:.no_toc}
PostreSQL Backup is a Docker container image that can be used to backup, restore and migrate Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
PostreSQL Backup is a Docker container image that can be used to backup, restore and migrate Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, ftp and SSH compatible storage.
It also supports database __encryption__ using GPG.
@@ -82,6 +82,18 @@ services:
networks:
web:
```
### Docker recurring backup
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/pg-bkup backup -d dbName --cron-expression "@every 1m"
```
See: https://jkaninda.github.io/pg-bkup/reference/#predefined-schedules
## Kubernetes
```yaml
@@ -146,7 +158,7 @@ While it may work against different implementations, there are no guarantees abo
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
- The original image is based on `Alpine` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.

View File

@@ -25,51 +25,50 @@ Backup, restore and migrate targets, schedule and retention are configured using
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
| --dbname | -d | Database name |
| --port | -p | Database port (default: 5432) |
| --mode | -m | Execution mode. default or scheduled (default: default) |
| --disable-compression | | Disable database backup compression |
| --prune | | Delete old backup, default disabled |
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Environment variables
| Name | Requirement | Description |
|------------------------|----------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 5432 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
| Name | Requirement | Description |
|------------------------|---------------------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 5432 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
---
## Run in Scheduled mode
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
For Docker, you need to run it in scheduled mode by adding `--cron-expression "* * * * *"` flag or by defining `BACKUP_CRON_EXPRESSION=0 1 * * *` environment variable.
## Syntax of crontab (field description)
@@ -111,4 +110,22 @@ Easy to remember format:
```conf
0 1 * * *
```
```
## Predefined schedules
You may use one of several pre-defined schedules in place of a cron expression.
| Entry | Description | Equivalent To |
|------------------------|--------------------------------------------|---------------|
| @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * |
| @monthly | Run once a month, midnight, first of month | 0 0 1 * * |
| @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 |
| @daily (or @midnight) | Run once a day, midnight | 0 0 * * * |
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
### Intervals
You may also schedule backup task at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
@every <duration>
where "duration" is a string accepted by time.
For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that.

View File

@@ -3,11 +3,14 @@ services:
pg-bkup:
image: jkaninda/pg-bkup
container_name: pg-bkup
command: backup --dbname database_name --mode scheduled --period "0 1 * * *"
command: backup --dbname database_name #--cron-expression "@every 5m"
volumes:
- ./backup:/backup
environment:
- DB_PORT=5432
- DB_HOST=postgress
- DB_HOST=postgres
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
- DB_PASSWORD=${DB_PASSWORD}
# Check https://jkaninda.github.io/pg-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *

View File

@@ -6,7 +6,7 @@ services:
# for a list of available releases.
image: jkaninda/pg-bkup
container_name: pg-bkup
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
command: backup --storage s3 -d my-database
environment:
- DB_PORT=5432
- DB_HOST=postgres
@@ -21,6 +21,8 @@ services:
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# Check https://jkaninda.github.io/pg-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
# pg-bkup container must be connected to the same network with your database
networks:
- web

5
go.mod
View File

@@ -10,11 +10,14 @@ require (
require (
github.com/aws/aws-sdk-go v1.55.3 // indirect
github.com/bramvdbogaerde/go-scp v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hpcloud/tail v1.0.0 // indirect
github.com/jlaffaye/ftp v0.2.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
golang.org/x/crypto v0.18.0 // indirect
golang.org/x/sys v0.22.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
)

9
go.sum
View File

@@ -12,10 +12,17 @@ github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
@@ -29,6 +36,8 @@ github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEp
github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0=
github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=

View File

@@ -8,8 +8,8 @@ package pkg
import (
"fmt"
"github.com/hpcloud/tail"
"github.com/jkaninda/pg-bkup/utils"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"log"
"os"
@@ -20,108 +20,76 @@ import (
func StartBackup(cmd *cobra.Command) {
intro()
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
dbConf = initDbConfig(cmd)
//Initialize backup configs
config := initBackupConfig(cmd)
//Get flag value and set env
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
executionMode, _ = cmd.Flags().GetString("mode")
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
dbConf = getDbConfig(cmd)
//
if gpgPassphrase != "" {
encryption = true
}
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
if disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
}
if executionMode == "default" {
switch storage {
case "s3":
s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "local":
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "ssh", "remote":
sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
case "ftp":
utils.Fatal("Not supported storage type: %s", storage)
default:
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
}
} else if executionMode == "scheduled" {
scheduledMode(dbConf, storage)
if config.cronExpression == "" {
BackupTask(dbConf, config)
} else {
utils.Fatal("Error, unknown execution mode!")
if utils.IsValidCronExpression(config.cronExpression) {
scheduledMode(dbConf, config)
} else {
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
}
}
}
// Run in scheduled mode
func scheduledMode(db *dbConfig, storage string) {
fmt.Println()
fmt.Println("**********************************")
fmt.Println(" Starting PostgreSQL Bkup... ")
fmt.Println("***********************************")
func scheduledMode(db *dbConfig, config *BackupConfig) {
utils.Info("Running in Scheduled mode")
utils.Info("Execution period %s ", os.Getenv("BACKUP_CRON_EXPRESSION"))
utils.Info("Storage type %s ", storage)
utils.Info("Backup cron expression: %s", config.cronExpression)
utils.Info("Storage type %s ", config.storage)
//Test database connexion
testDatabaseConnection(db)
//Test backup
utils.Info("Testing backup configurations...")
BackupTask(db, config)
utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...")
CreateCrontabScript(disableCompression, storage)
// Create a new cron instance
c := cron.New()
supervisorConfig := "/etc/supervisor/supervisord.conf"
// Start Supervisor
cmd := exec.Command("supervisord", "-c", supervisorConfig)
err := cmd.Start()
_, err := c.AddFunc(config.cronExpression, func() {
BackupTask(db, config)
})
if err != nil {
utils.Fatal("Failed to start supervisord: %v", err)
return
}
// Start the cron scheduler
c.Start()
utils.Info("Creating backup job...done")
utils.Info("Backup job started")
defer func() {
if err := cmd.Process.Kill(); err != nil {
utils.Info("Failed to kill supervisord process: %v", err)
} else {
utils.Info("Supervisor stopped.")
}
}()
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) {
utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile))
defer c.Stop()
select {}
}
func BackupTask(db *dbConfig, config *BackupConfig) {
utils.Info("Starting backup task...")
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20240102_150405"))
if config.disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20240102_150405"))
}
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true})
if err != nil {
utils.Fatal("Failed to tail file: %v", err)
config.backupFileName = backupFileName
switch config.storage {
case "local":
localBackup(db, config)
case "s3":
s3Backup(db, config)
case "ssh", "remote":
sshBackup(db, config)
case "ftp":
ftpBackup(db, config)
//utils.Fatal("Not supported storage type: %s", config.storage)
default:
localBackup(db, config)
}
// Read and print new lines from the log file
for line := range t.Lines {
fmt.Println(line.Text)
}
}
func intro() {
utils.Info("Starting PostgreSQL Backup...")
utils.Info("Copyright © 2024 Jonas Kaninda ")
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
}
// BackupDatabase backup database
@@ -195,55 +163,55 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
utils.Info("Database has been backed up")
}
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage")
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config.backupFileName, config.passphrase)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
}
utils.Info("Backup name is %s", finalFileName)
moveToBackup(finalFileName, storagePath)
//Send notification
utils.NotifySuccess(finalFileName)
//Delete old backup
if prune {
deleteOldBackup(backupRetention)
if config.prune {
deleteOldBackup(config.backupRetention)
}
//Delete temp
deleteTemp()
}
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
func s3Backup(db *dbConfig, config *BackupConfig) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
utils.Info("Backup database to s3 storage")
//Backup database
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config.backupFileName, config.passphrase)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage S3 ... ")
utils.Info("Backup name is %s", finalFileName)
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
if err != nil {
utils.Fatal("Error uploading backup archive to S3: %s ", err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
// Delete old backup
if prune {
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
if config.prune {
err := DeleteOldBackup(bucket, s3Path, config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from S3: %s ", err)
}
@@ -254,18 +222,18 @@ func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prun
//Delete temp
deleteTemp()
}
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server")
//Backup database
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config.backupFileName, config.passphrase)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, remotePath)
err := CopyToRemote(finalFileName, config.remotePath)
if err != nil {
utils.Fatal("Error uploading file to the remote server: %s ", err)
@@ -277,7 +245,7 @@ func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompressi
utils.Error("Error deleting file: %v", err)
}
if prune {
if config.prune {
//TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet")
@@ -289,10 +257,44 @@ func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompressi
//Delete temp
deleteTemp()
}
func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server")
//Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config.backupFileName, config.passphrase)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName)
err := CopyToFTP(finalFileName, config.remotePath)
if err != nil {
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
func encryptBackup(backupFileName string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
//TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet")
}
utils.Done("Uploading backup archive to the remote FTP server ... done ")
//Send notification
utils.NotifySuccess(finalFileName)
//Delete temp
deleteTemp()
}
func encryptBackup(backupFileName, gpqPassphrase string) {
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpqPassphrase)
if err != nil {
utils.Fatal("Error during encrypting backup %v", err)
}

View File

@@ -33,8 +33,42 @@ type TgConfig struct {
Token string
ChatId string
}
type BackupConfig struct {
backupFileName string
backupRetention int
disableCompression bool
prune bool
encryption bool
remotePath string
passphrase string
storage string
cronExpression string
}
type FTPConfig struct {
host string
user string
password string
port string
remotePath string
}
func getDbConfig(cmd *cobra.Command) *dbConfig {
func initFtpConfig() *FTPConfig {
//Initialize backup configs
fConfig := FTPConfig{}
fConfig.host = os.Getenv("FTP_HOST_NAME")
fConfig.user = os.Getenv("FTP_USER")
fConfig.password = os.Getenv("FTP_PASSWORD")
fConfig.port = os.Getenv("FTP_PORT")
fConfig.remotePath = os.Getenv("REMOTE_PATH")
err := utils.CheckEnvVars(ftpVars)
if err != nil {
utils.Error("Please make sure all required environment variables for FTP are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &fConfig
}
func initDbConfig(cmd *cobra.Command) *dbConfig {
//Set env
utils.GetEnv(cmd, "dbname", "DB_NAME")
dConf := dbConfig{}
@@ -51,7 +85,72 @@ func getDbConfig(cmd *cobra.Command) *dbConfig {
}
return &dConf
}
func getTargetDbConfig() *targetDbConfig {
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
_, _ = cmd.Flags().GetString("mode")
passphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
if passphrase != "" {
encryption = true
}
//Initialize backup configs
config := BackupConfig{}
config.backupRetention = backupRetention
config.disableCompression = disableCompression
config.prune = prune
config.storage = storage
config.encryption = encryption
config.remotePath = remotePath
config.passphrase = passphrase
config.cronExpression = cronExpression
return &config
}
type RestoreConfig struct {
s3Path string
remotePath string
storage string
file string
bucket string
gpqPassphrase string
}
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
_, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
//Initialize restore configs
rConfig := RestoreConfig{}
rConfig.s3Path = s3Path
rConfig.remotePath = remotePath
rConfig.storage = storage
rConfig.bucket = bucket
rConfig.file = file
rConfig.storage = storage
rConfig.gpqPassphrase = gpqPassphrase
return &rConfig
}
func initTargetDbConfig() *targetDbConfig {
tdbConfig := targetDbConfig{}
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")

View File

@@ -16,7 +16,7 @@ import (
func Decrypt(inputFile string, passphrase string) error {
utils.Info("Decrypting backup file: %s...", inputFile)
//Create gpg home dir
err := utils.MakeDir(gpgHome)
err := utils.MakeDirAll(gpgHome)
if err != nil {
return err
}
@@ -37,7 +37,7 @@ func Decrypt(inputFile string, passphrase string) error {
func Encrypt(inputFile string, passphrase string) error {
utils.Info("Encrypting backup...")
//Create gpg home dir
err := utils.MakeDir(gpgHome)
err := utils.MakeDirAll(gpgHome)
if err != nil {
return err
}

81
pkg/ftp.go Normal file
View File

@@ -0,0 +1,81 @@
package pkg
import (
"fmt"
"github.com/jlaffaye/ftp"
"io"
"os"
"path/filepath"
"time"
)
// initFtpClient initializes and authenticates an FTP client
func initFtpClient() (*ftp.ServerConn, error) {
ftpConfig := initFtpConfig()
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
if err != nil {
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
}
return ftpClient, nil
}
// CopyToFTP uploads a file to the remote FTP server
func CopyToFTP(fileName, remotePath string) (err error) {
ftpConfig := initFtpConfig()
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", fileName, err)
}
defer file.Close()
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
err = ftpClient.Stor(remoteFilePath, file)
if err != nil {
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
}
return nil
}
// CopyFromFTP downloads a file from the remote FTP server
func CopyFromFTP(fileName, remotePath string) (err error) {
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
remoteFilePath := filepath.Join(remotePath, fileName)
r, err := ftpClient.Retr(remoteFilePath)
if err != nil {
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
}
defer r.Close()
localFilePath := filepath.Join(tmpPath, fileName)
outFile, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
}
defer outFile.Close()
_, err = io.Copy(outFile, r)
if err != nil {
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
}
return nil
}

View File

@@ -17,8 +17,8 @@ func StartMigration(cmd *cobra.Command) {
intro()
utils.Info("Starting database migration...")
//Get DB config
dbConf = getDbConfig(cmd)
targetDbConf = getTargetDbConfig()
dbConf = initDbConfig(cmd)
targetDbConf = initTargetDbConfig()
//Defining the target database variables
newDbConfig := dbConfig{}

View File

@@ -17,40 +17,30 @@ import (
func StartRestore(cmd *cobra.Command) {
intro()
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
dbConf = initDbConfig(cmd)
restoreConf := initRestoreConfig(cmd)
//Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
dbConf = getDbConfig(cmd)
switch storage {
switch restoreConf.storage {
case "s3":
restoreFromS3(dbConf, file, bucket, s3Path)
restoreFromS3(dbConf, restoreConf.file, restoreConf.bucket, restoreConf.s3Path)
case "local":
utils.Info("Restore database from local")
copyToTmp(storagePath, file)
RestoreDatabase(dbConf, file)
copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(dbConf, restoreConf.file)
case "ssh":
restoreFromRemote(dbConf, file, remotePath)
restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath)
case "ftp":
utils.Fatal("Restore from FTP is not yet supported")
restoreFromFTP(dbConf, restoreConf.file, restoreConf.remotePath)
default:
utils.Info("Restore database from local")
copyToTmp(storagePath, file)
RestoreDatabase(dbConf, file)
copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(dbConf, restoreConf.file)
}
}
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
err := DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil {
utils.Fatal("Error download file from s3 %s %v ", file, err)
}
@@ -64,6 +54,14 @@ func restoreFromRemote(db *dbConfig, file, remotePath string) {
}
RestoreDatabase(db, file)
}
func restoreFromFTP(db *dbConfig, file, remotePath string) {
utils.Info("Restore database from FTP server")
err := CopyFromFTP(file, remotePath)
if err != nil {
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(remotePath, file), err)
}
RestoreDatabase(db, file)
}
// RestoreDatabase restore database
func RestoreDatabase(db *dbConfig, file string) {
@@ -103,11 +101,11 @@ func RestoreDatabase(db *dbConfig, file string) {
testDatabaseConnection(db)
utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
extension := filepath.Ext(file)
// Restore from compressed file / .sql.gz
if extension == ".gz" {
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("bash", "-c", str).Output()
str := "zcat " + filepath.Join(tmpPath, file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database %v", err)
}
@@ -118,8 +116,8 @@ func RestoreDatabase(db *dbConfig, file string) {
} else if extension == ".sql" {
//Restore from sql file
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("bash", "-c", str).Output()
str := "cat " + filepath.Join(tmpPath, file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
utils.Fatal("Error in restoring the database %v", err)
}

View File

@@ -1,10 +1,10 @@
// Package utils /
// Package pkg
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
package pkg
import (
"bytes"
@@ -13,6 +13,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/jkaninda/pg-bkup/utils"
"net/http"
"os"
"path/filepath"
@@ -32,20 +33,20 @@ func CreateSession() (*session.Session, error) {
"AWS_REGION",
}
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
endPoint := utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
accessKey := utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
secretKey := utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
_ = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
region := os.Getenv("AWS_REGION")
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
}
err = CheckEnvVars(awsVars)
err = utils.CheckEnvVars(awsVars)
if err != nil {
Fatal("Error checking environment variables\n: %s", err)
utils.Fatal("Error checking environment variables\n: %s", err)
}
// Configure to use MinIO Server
s3Config := &aws.Config{
@@ -105,10 +106,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
if err != nil {
return err
}
Info("Download backup from S3 storage...")
utils.Info("Download backup from S3 storage...")
file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil {
Error("Failed to create file", err)
utils.Error("Failed to create file", err)
return err
}
defer file.Close()
@@ -122,10 +123,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
Key: aws.String(objectKey),
})
if err != nil {
Error("Failed to download file", err)
utils.Error("Failed to download file %s", key)
return err
}
Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
return nil
}
@@ -155,18 +156,18 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
Key: object.Key,
})
if err != nil {
Info("Failed to delete object %s: %v", *object.Key, err)
utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else {
Info("Deleted object %s\n", *object.Key)
utils.Info("Deleted object %s\n", *object.Key)
}
}
}
return !lastPage
})
if err != nil {
Error("Failed to list objects: %v", err)
utils.Error("Failed to list objects: %v", err)
}
Info("Finished deleting old files.")
utils.Info("Finished deleting old files.")
return nil
}

View File

@@ -1,73 +0,0 @@
package pkg
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
import (
"fmt"
"github.com/jkaninda/pg-bkup/utils"
"os"
"os/exec"
)
func CreateCrontabScript(disableCompression bool, storage string) {
//task := "/usr/local/bin/backup_cron.sh"
touchCmd := exec.Command("touch", backupCronFile)
if err := touchCmd.Run(); err != nil {
utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
}
var disableC = ""
if disableCompression {
disableC = "--disable-compression"
}
var scriptContent string
scriptContent = fmt.Sprintf(`#!/usr/bin/env bash
set -e
/usr/local/bin/pg-bkup backup --dbname %s --storage %s %v
`, os.Getenv("DB_NAME"), storage, disableC)
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
}
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
if err := chmodCmd.Run(); err != nil {
utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
}
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
if err := lnCmd.Run(); err != nil {
utils.Fatal("Error creating symbolic link: %v\n", err)
}
touchLogCmd := exec.Command("touch", cronLogFile)
if err := touchLogCmd.Run(); err != nil {
utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
}
cronJob := "/etc/cron.d/backup_cron"
touchCronCmd := exec.Command("touch", cronJob)
if err := touchCronCmd.Run(); err != nil {
utils.Fatal("Error creating file %s: %v\n", cronJob, err)
}
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
`, os.Getenv("BACKUP_CRON_EXPRESSION"), cronLogFile)
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
}
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
crontabCmd := exec.Command("crontab", "/etc/cron.d/backup_cron")
if err := crontabCmd.Run(); err != nil {
utils.Fatal("Error updating crontab: ", err)
}
utils.Info("Backup job created.")
}

View File

@@ -6,17 +6,15 @@
**/
package pkg
const cronLogFile = "/var/log/pg-bkup.log"
const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
const gpgHome = "gnupg"
const gpgHome = "/config/gnupg"
const algorithm = "aes256"
const gpgExtension = "gpg"
var (
storage = "local"
file = ""
executionMode = "default"
storage = "local"
file = ""
storagePath = "/backup"
disableCompression = false
encryption = false
@@ -43,9 +41,15 @@ var targetDbConf *targetDbConfig
// sshVars Required environment variables for SSH remote server storage
var sshVars = []string{
"SSH_USER",
"SSH_REMOTE_PATH",
"SSH_HOST_NAME",
"SSH_PORT",
"REMOTE_PATH",
}
var ftpVars = []string{
"FTP_HOST_NAME",
"FTP_USER",
"FTP_PASSWORD",
"FTP_PORT",
}
// AwsVars Required environment variables for AWS S3 storage

View File

@@ -7,10 +7,10 @@
package utils
const RestoreExample = "pg-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
"bkup restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
const BackupExample = "pg-bkup backup --dbname database --disable-compression\n" +
"pg-bkup backup --dbname database --storage s3 --path /custom-path --disable-compression"
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
const MainExample = "pg-bkup backup --dbname database --disable-compression\n" +
"pg-bkup backup --dbname database --storage s3 --path /custom-path\n" +
"pg-bkup restore --dbname database --file db_20231219_022941.sql.gz"
"backup --dbname database --storage s3 --path /custom-path\n" +
"restore --dbname database --file db_20231219_022941.sql.gz"

View File

@@ -10,6 +10,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"io"
"io/fs"
@@ -222,7 +223,7 @@ func NotifySuccess(fileName string) {
//Telegram notification
err := CheckEnvVars(vars)
if err == nil {
message := "PostgreSQL Backup \n" +
message := "[✅ PostgreSQL Backup ]\n" +
"Database has been backed up \n" +
"Backup name is " + fileName
sendMessage(message)
@@ -237,7 +238,7 @@ func NotifyError(error string) {
//Telegram notification
err := CheckEnvVars(vars)
if err == nil {
message := "PostgreSQL Backup \n" +
message := "[🔴 PostgreSQL Backup ]\n" +
"An error occurred during database backup \n" +
"Error: " + error
sendMessage(message)
@@ -248,3 +249,7 @@ func getTgUrl() string {
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
}
func IsValidCronExpression(cronExpr string) bool {
_, err := cron.ParseStandard(cronExpr)
return err == nil
}