Initial commit

This commit is contained in:
2023-12-22 07:13:49 +01:00
commit da68aae5b1
9 changed files with 572 additions and 0 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

39
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: Build
on:
push:
branches: [ "main" ]
workflow_dispatch:
inputs:
docker_tag:
description: 'Docker tag'
required: true
default: 'latest'
type: string
env:
BUILDKIT_IMAGE: jkaninda/pg-bkup
jobs:
docker:
runs-on: ubuntu-latest
steps:
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build and push
uses: docker/build-push-action@v3
with:
push: true
file: "./src/docker/Dockerfile"
platforms: linux/amd64,linux/arm64
tags: |
"${{env.BUILDKIT_IMAGE}}:latest"
"${{env.BUILDKIT_IMAGE}}:v0.1"

5
.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
/.history
backup
data
compose.yaml
.env

232
README.md Normal file
View File

@@ -0,0 +1,232 @@
# Postgres Backup
Postgres Backup tool, backup database to S3 or Object Storage
- Docker
- Kubernetes
[![Build](https://github.com/jkaninda/pg-bkup/actions/workflows/build.yml/badge.svg)](https://github.com/jkaninda/pg-bkup/actions/workflows/build.yml)
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/pg-bkup?style=flat-square)
![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/pg-bkup?style=flat-square)
- [Docker Hub](https://hub.docker.com/r/jkaninda/pg-bkup)
- [Github](https://github.com/jkaninda/pg-bkup)
## Storage:
- local
- s3
- Object storage
## Usage
| Options | Shorts | Usage |
|---------------|--------|------------------------------------|
| pg_bkup | bkup | CLI utility |
| --operation | -o | Set operation. backup or restore (default: backup) |
| --storage | -s | Set storage. local or s3 (default: local) |
| --file | -f | Set file name for restoration |
| --path | | Set s3 path without file name. eg: /custom_path |
| --dbname | -d | Set database name |
| --port | -p | Set database port (default: 3306) |
| --timeout | -t | Set timeout (default: 60s) |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Backup database :
Simple backup usage
```sh
bkup --operation backup
```
```sh
bkup -o backup
```
### S3
```sh
bkup --operation backup --storage s3
```
## Docker run:
```sh
docker run --rm --network your_network_name --name mysql-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/pg-bkup bkup -o backup -d database_name
```
## Docker compose file:
```yaml
version: '3'
services:
postgres:
image: postgres:14.5
container_name: postgres
pull_policy: if_not_present
restart: unless-stopped
volumes:
- ./postgres:/var/lib/postgresql/data
environment:
POSTGRES_DB: bkup
POSTGRES_PASSWORD: password
POSTGRES_USER: bkup
mysql-bkup:
image: jkaninda/mysql-bkup:latest
container_name: mysql-bkup
command:
- /bin/sh
- -c
- bkup --operation backup -db mariadb
volumes:
- ./backup:/backup
environment:
- DB_PORT=5432
- DB_HOST=postgres
- DB_NAME=mariadb
- DB_USERNAME=mariadb
- DB_PASSWORD=password
```
## Restore database :
Simple database restore operation usage
```sh
bkup --operation restore --file database_20231217_115621.sql --dbname database_name
```
```sh
bkup -o restore -f database_20231217_115621.sql -d database_name
```
### S3
```sh
bkup --operation restore --storage s3 --file database_20231217_115621.sql --dbname database_name
```
## Docker run:
```sh
docker run --rm --network your_network_name --name pg-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/mysql-bkup:latest bkup -o backup -d database_name -f napata_20231219_022941.sql.gz
```
## Docker compose file:
```yaml
version: '3'
services:
pg-bkup:
image: jkaninda/pg-bkup:latest
container_name: pg-bkup
command:
- /bin/sh
- -c
- bkup --operation restore --file database_20231217_115621.sql -d database_name
volumes:
- ./backup:/backup
environment:
#- FILE_NAME=mariadb_20231217_040238.sql # Optional if file name is set from command
- DB_PORT=5432
- DB_HOST=postgres
- DB_USERNAME=user_name
- DB_PASSWORD=password
```
## Run
```sh
docker-compose up -d
```
## Backup to S3
```sh
docker run --rm --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=db_hostname" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" -e "ACCESS_KEY=your_access_key" -e "SECRET_KEY=your_secret_key" -e "BUCKETNAME=your_bucket_name" -e "S3_ENDPOINT=https://eu2.contabostorage.com" jkaninda/mysql-bkup:latest bkup -o backup -s s3 -d database_name
```
> To change s3 backup path add this flag : --path myPath . default path is /mysql_bkup
Simple S3 backup usage
```sh
bkup --operation backup --storage s3 --dbname mydatabase
```
```yaml
mysql-bkup:
image: jkaninda/mysql-bkup:latest
container_name: mysql-bkup
tty: true
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- pg_bkup --operation restore --source s3 -f database_20231217_115621.sql.gz --dbname database_name
environment:
- DB_PORT=3306
- DB_HOST=postgress
- DB_USERNAME=user_name
- DB_PASSWORD=password
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKETNAME=${BUCKETNAME}
- S3_ENDPOINT=${S3_ENDPOINT}
```
## Run "docker run" from crontab
Make an automated backup (every night at 1).
> backup_script.sh
```sh
#!/bin/sh
DB_USERNAME='db_username'
DB_PASSWORD='password'
DB_HOST='db_hostname'
DB_NAME='db_name'
BACKUP_DIR='/some/path/backup/'
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/pg-bkup bkup -o backup -d $DB_NAME
```
```sh
chmod +x backup_script.sh
```
Your crontab looks like this:
```conf
0 1 * * * /path/to/backup_script.sh
```
## Kubernetes CronJob
Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: pg-bkup-job
spec:
schedule: "0 0 * * *"
jobTemplate:
spec:
template:
spec:
backoffLimit: 4
containers:
- name: pg-bkup
image: jkaninda/pg-bkup
command:
- /bin/sh
- -c
- bkup --operation backup
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: "postgress-svc"
- name: DB_NAME
value: "database_name"
- name: DB_USERNAME
value: "db_name"
# Please use secret instead!
- name: DB_PASSWORD
value: "password"
restartPolicy: Never
```

8
backup_script.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
DB_USERNAME='db_username'
DB_PASSWORD='password'
DB_HOST='db_hostname'
DB_NAME='db_name'
BACKUP_DIR="$PWD/backup"
docker run --rm --name pg-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/pg-bkup bkup -o backup -d $DB_NAME

11
build.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/usr/bin/env bash
if [ $# -eq 0 ]
then
tag='latest'
else
tag=$1
fi
docker build -f src/docker/Dockerfile -t jkaninda/pg-bkup:$tag .
docker-compose up -d

31
k8s-job.yaml Normal file
View File

@@ -0,0 +1,31 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: pg-bkup-job
spec:
schedule: "0 0 * * *"
jobTemplate:
spec:
template:
spec:
backoffLimit: 4
containers:
- name: pg-bkup
image: jkaninda/pg-bkup
command:
- /bin/sh
- -c
- bkup --operation backup
env:
- name: DB_PORT
value: "5432"
- name: DB_HOST
value: "porstgress-svc"
- name: DB_NAME
value: "database_name"
- name: DB_USERNAME
value: "user_name"
# Please use secret!
- name: DB_PASSWORD
value: "password"
restartPolicy: Never

36
src/docker/Dockerfile Normal file
View File

@@ -0,0 +1,36 @@
FROM ubuntu:24.04
ENV DB_HOST=""
ENV DB_NAME=""
ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT="5432"
ENV DESTINATION=local
ENV STORAGE=local
ENV SOURCE=local
ENV BUCKETNAME=""
ENV ACCESS_KEY=""
ENV SECRET_KEY=""
ENV S3_ENDPOINT=https://s3.amazonaws.com
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="0.1"
RUN apt-get update -qq
RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
RUN apt install s3fs postgresql-client postgresql-client-common libpq-dev -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir /s3mnt
RUN mkdir /tmp/s3cache
RUN chmod 777 /s3mnt
RUN chmod 777 /tmp/s3cache
COPY src/pg_bkup.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/pg_bkup.sh
RUN ln -s /usr/local/bin/pg_bkup.sh /usr/local/bin/pg_bkup
RUN ln -s /usr/local/bin/pg_bkup.sh /usr/local/bin/bkup
RUN mkdir /backup
WORKDIR /backup

210
src/pg_bkup.sh Executable file
View File

@@ -0,0 +1,210 @@
#!/usr/bin/env bash
set -e
TIME=$(date +%Y%m%d_%H%M%S)
MY_SQL_DUMP=/usr/bin/mysqldump
arg0=$(basename "$0" .sh)
blnk=$(echo "$arg0" | sed 's/./ /g')
export OPERATION=backup
export STORAGE=local
export STORAGE_PATH=/backup
export S3_PATH=/mysql-bkup
export TIMEOUT=60
export PGPASSWORD=""
export FILE_COMPRESION=true
export CONNECTION=""
usage_info()
{
echo "Usage: \\"
echo " $blnk Backup: pg_bkup -o backup -s s3 \\"
echo " $blnk Restore: pg_bkup -o restore -s s3 -f my_db.sql \\"
echo " $blnk [-o|--operation] [{-f|--file} ] [{-s|--storage} ] [{-h|--help} ] \\"
}
version_info()
{
echo "Version: $VERSION"
exit 0
}
usage()
{
exec 1>2 # Send standard output to standard error
usage_info
exit 0
}
error()
{
echo "$arg0: $*" >&2
exit 0
}
help()
{
echo
echo " -o |--operation -- Set operation (default: backup)"
echo " -s |--storage -- Set storage (default: local)"
echo " -f |--file -- Set file name "
echo " |--path -- Set s3 path, without file name"
echo " -d |--dbname -- Set database name "
echo " -p |--port -- Set database port (default: 3306)"
echo " -t |--timeout -- Set timeout (default: 120s)"
echo " -h |--help -- Print this help message and exit"
echo " -V |--version -- Print version information and exit"
exit 0
}
flags()
{
while test $# -gt 0
do
case "$1" in
(-o|--operation)
shift
[ $# = 0 ] && error "No operation specified - restore or backup"
export OPERATION="$1"
shift;;
(-d|--dbname)
shift
[ $# = 0 ] && error "No database name specified"
export DB_NAME="$1"
shift;;
(-s|--storage)
shift
[ $# = 0 ] && error "No storage specified - local or s3 | default local"
export STORAGE="$1"
shift;;
(-f|--file)
shift
[ $# = 0 ] && error "No file specified - file to restore"
export FILE_NAME="$1"
shift;;
(--path)
shift
[ $# = 0 ] && error "No s3 path specified - s3 path without file name"
export S3_PATH="$1"
shift;;
(-db|--database)
shift
[ $# = 0 ] && error "No database name specified"
export DB_NAME="$1"
shift;;
(-p|--port)
shift
[ $# = 0 ] && error "No database name specified"
export DB_PORT="$1"
shift;;
(-t|--timeout)
shift
[ $# = 0 ] && error "No timeout specified"
export TIMEOUT="$1"
shift;;
(-h|--help)
help;;
(-V|--version)
version_info;;
(--)
help;;
(*) usage;;
esac
done
}
create_pgpass(){
export CONNECTION=${DB_HOST}:${DB_PORT}:${DB_DATABASE}:${DB_USERNAME}:${DB_PASSWORD}
echo $CONNECTION > ~/.pgpass
chmod 600 ~/.pgpass
}
backup()
{
if [ -z "${DB_HOST}"] || [ -z "${DB_NAME}"] || [ -z "${DB_USERNAME}"] || [ -z "${DB_PASSWORD}"]; then
echo "Please make sure all required options are set "
else
export PGPASSWORD=${DB_PASSWORD}
## Test database connection
## Backup database
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USERNAME} -d ${DB_NAME} -v | gzip > ${STORAGE_PATH}/${DB_NAME}_${TIME}.sql.gz
echo "Database has been saved"
fi
exit 0
}
restore()
{
if [ -z "${DB_HOST}" ] || [ -z "${DB_NAME}" ] || [ -z "${DB_USERNAME}" ] || [ -z "${DB_PASSWORD}" ]; then
echo "Please make sure all required options are set "
else
## Restore database
export PGPASSWORD=${DB_PASSWORD}
if [ -f "${STORAGE_PATH}/$FILE_NAME" ]; then
#pg_restore -h ${DB_HOST} -P ${DB_PORT} -U ${DB_USERNAME} -v -d ${DB_NAME} ${STORAGE_PATH}/$FILE_NAME
#cat ${STORAGE_PATH}/${FILE_NAME} | psql -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USERNAME} -v -d ${DB_NAME} < ${STORAGE_PATH}/$FILE_NAME
if gzip -t ${STORAGE_PATH}/$FILE_NAME; then
zcat ${STORAGE_PATH}/${FILE_NAME} | psql -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USERNAME} -v -d ${DB_NAME}
else
cat ${STORAGE_PATH}/${FILE_NAME} | psql -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USERNAME} -v -d ${DB_NAME}
fi
echo "Database has been restored"
else
echo "Error, file not found in ${STORAGE_PATH}/${FILE_NAME}"
fi
fi
exit
}
s3_backup()
{
mount_s3
backup
}
s3_restore()
{
mount_s3
restore
}
mount_s3()
{
if [ -z "${ACCESS_KEY}"] || [ -z "${SECRET_KEY}"]; then
echo "Please make sure all environment variables are set "
echo "BUCKETNAME=$BUCKETNAME \nACCESS_KEY=$nACCESS_KEY \nSECRET_KEY=$SECRET_KEY"
else
echo "$ACCESS_KEY:$SECRET_KEY" | tee /etc/passwd-s3fs
chmod 600 /etc/passwd-s3fs
echo "Mounting Object storage in /s3mnt .... "
if [ -z "$(ls -A /s3mnt)" ]; then
s3fs $BUCKETNAME /s3mnt -o passwd_file=/etc/passwd-s3fs -o use_cache=/tmp/s3cache -o allow_other -o url=$S3_ENDPOINT -o use_path_request_style
if [ ! -d "/s3mnt$S3_PATH" ]; then
mkdir -p /s3mnt$S3_PATH
fi
else
echo "Object storage already mounted in /s3mnt"
fi
export STORAGE_PATH=/s3mnt$S3_PATH
fi
}
flags "$@"
# ?
if [ $OPERATION != 'backup' ]
then
if [ $STORAGE != 's3' ]
then
echo "Restore from local"
restore
else
echo "Restore from s3"
s3_restore
fi
else
if [ $STORAGE != 's3' ]
then
echo "Backup to local destination"
backup
else
echo "Backup to s3 storage"
s3_backup
fi
fi