chore: migrate project to Go

This commit is contained in:
2024-01-18 16:08:11 +01:00
parent 5f048cdf83
commit 23a0ad5695
13 changed files with 574 additions and 345 deletions

View File

@@ -32,8 +32,8 @@ jobs:
uses: docker/build-push-action@v3
with:
push: true
file: "./src/Dockerfile"
file: "./docker/Dockerfile"
platforms: linux/amd64,linux/arm64
tags: |
"${{env.BUILDKIT_IMAGE}}:v0.2"
"${{env.BUILDKIT_IMAGE}}:v0.3"
"${{env.BUILDKIT_IMAGE}}:latest"

3
.gitignore vendored
View File

@@ -4,4 +4,5 @@ data
compose.yaml
.env
test.md
.DS_Store
.DS_Store
pg-bkup

View File

@@ -2,6 +2,7 @@
Postgres Backup tool, backup database to S3 or Object Storage
[![Build](https://github.com/jkaninda/pg-bkup/actions/workflows/build.yml/badge.svg)](https://github.com/jkaninda/pg-bkup/actions/workflows/build.yml)
[![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/pg-bkup)
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/pg-bkup?style=flat-square)
![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/pg-bkup?style=flat-square)
@@ -27,23 +28,32 @@ Postgres Backup tool, backup database to S3 or Object Storage
- /s3mnt => S3 mounting path
- /backup => local storage mounting path
## Usage
### Usage
| Options | Shorts | Usage |
|---------------|--------|------------------------------------|
| pg_bkup | bkup | CLI utility |
| pg-bkup | bkup | CLI utility |
| --operation | -o | Set operation. backup or restore (default: backup) |
| --storage | -s | Set storage. local or s3 (default: local) |
| --file | -f | Set file name for restoration |
| --path | | Set s3 path without file name. eg: /custom_path |
| --dbname | -d | Set database name |
| --port | -p | Set database port (default: 3306) |
| --port | -p | Set database port (default: 5432) |
| --mode | -m | Set execution mode. default or scheduled (default: default) |
| --disable-compression | | Disable database backup compression |
| --period | | Set crontab period for scheduled mode only. (default: "0 1 * * *") |
| --timeout | -t | Set timeout (default: 60s) |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Note:
Creating a user for backup tasks who has read-only access is recommended!
> create read-only user
## Backup database :
Simple backup usage

View File

@@ -6,6 +6,9 @@ if [ $# -eq 0 ]
tag=$1
fi
docker build -f src/Dockerfile -t jkaninda/pg-bkup:$tag .
#go build
CGO_ENABLED=0 GOOS=linux go build
#docker compose up -d
docker build -f docker/Dockerfile -t jkaninda/pg-bkup:$tag .
docker compose up -d --force-recreate

48
docker/Dockerfile Normal file
View File

@@ -0,0 +1,48 @@
FROM golang:1.21.0 AS build
WORKDIR /app
# Copy the source code.
COPY . .
# Installs Go dependencies
RUN go mod download
# Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/pg-bkup
FROM ubuntu:24.04
ENV DB_HOST=""
ENV DB_NAME=""
ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT="3306"
ENV STORAGE=local
ENV BUCKETNAME=""
ENV ACCESS_KEY=""
ENV SECRET_KEY=""
ENV S3_ENDPOINT=https://s3.amazonaws.com
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v0.3"
LABEL authors="Jonas Kaninda"
RUN apt-get update -qq
RUN apt install s3fs postgresql-client postgresql-client-common libpq-dev supervisor cron -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir /s3mnt
RUN mkdir /tmp/s3cache
RUN chmod 777 /s3mnt
RUN chmod 777 /tmp/s3cache
COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup
RUN chmod +x /usr/local/bin/pg-bkup
RUN ln -s /usr/local/bin/pg-bkup /usr/local/bin/bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
RUN mkdir /backup
WORKDIR /backup

5
go.mod
View File

@@ -1,3 +1,6 @@
module github.com/jkaninda/pg-bkup
go 1.20
go 1.21.0
require(
github.com/spf13/pflag v1.0.5
)

2
go.sum Normal file
View File

@@ -0,0 +1,2 @@
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=

426
main.go Normal file
View File

@@ -0,0 +1,426 @@
package main
/*****
* PostgreSQL Backup & Restore
* @author Jonas Kaninda
* @license MIT License <https://opensource.org/licenses/MIT>
* @link https://github.com/jkaninda/pg-bkup
**/
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/jkaninda/pg-bkup/utils"
flag "github.com/spf13/pflag"
)
var appVersion string = os.Getenv("VERSION")
const s3MountPath string = "/s3mnt"
var (
operation string = "bakup"
storage string = "local"
file string = ""
s3Path string = "/pg-bkup"
dbName string = ""
dbHost string = ""
dbPort string = "5432"
dbPassword string = ""
dbUserName string = ""
executionMode string = "default"
storagePath string = "/backup"
accessKey string = ""
secretKey string = ""
bucketName string = ""
s3Endpoint string = ""
s3fsPasswdFile string = "/etc/passwd-s3fs"
disableCompression bool = false
startBackup bool = true
outputContent string = ""
potimeout int = 30
period string = "0 1 * * *"
)
func init() {
var (
operationFlag = flag.StringP("operation", "o", "backup", "Set operation")
storageFlag = flag.StringP("storage", "s", "local", "Set storage. local or s3")
fileFlag = flag.StringP("file", "f", "", "Set file name")
pathFlag = flag.StringP("path", "P", "/mysql-bkup", "Set s3 path, without file name")
dbnameFlag = flag.StringP("dbname", "d", "", "Set database name")
modeFlag = flag.StringP("mode", "m", "default", "Set execution mode. default or scheduled")
periodFlag = flag.StringP("period", "", "0 1 * * *", "Set schedule period time")
timeoutFlag = flag.IntP("timeout", "t", 30, "Set timeout")
disableCompressionFlag = flag.BoolP("disable-compression", "", false, "Disable backup compression")
portFlag = flag.IntP("port", "p", 5432, "Set database port")
helpFlag = flag.BoolP("help", "h", false, "Print this help message")
versionFlag = flag.BoolP("version", "v", false, "shows version information")
)
flag.Parse()
operation = *operationFlag
storage = *storageFlag
file = *fileFlag
s3Path = *pathFlag
dbName = *dbnameFlag
executionMode = *modeFlag
dbPort = fmt.Sprint(*portFlag)
potimeout = *timeoutFlag
period = *periodFlag
disableCompression = *disableCompressionFlag
flag.Usage = func() {
fmt.Print("Usage: bkup -o backup -s s3 -d databasename --path /my_path ...\n")
fmt.Print(" bkup -o backup -d databasename --disable-compression ...\n")
fmt.Print(" Restore: bkup -o restore -d databasename -f db_20231217_051339.sql.gz ...\n\n")
flag.PrintDefaults()
}
if *helpFlag {
startBackup = false
flag.Usage()
os.Exit(0)
}
if *versionFlag {
startBackup = false
version()
os.Exit(0)
}
if *dbnameFlag != "" {
os.Setenv("DB_NAME", dbName)
}
if *pathFlag != "" {
s3Path = *pathFlag
os.Setenv("S3_PATH", fmt.Sprint(*pathFlag))
}
if *fileFlag != "" {
file = *fileFlag
os.Setenv("FILE_NAME", fmt.Sprint(*fileFlag))
}
if *portFlag != 3306 {
os.Setenv("DB_PORT", fmt.Sprint(*portFlag))
}
if *periodFlag != "" {
os.Setenv("SCHEDULE_PERIOD", fmt.Sprint(*periodFlag))
}
if *storageFlag != "" {
os.Setenv("STORAGE", fmt.Sprint(*storageFlag))
}
dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
period = os.Getenv("SCHEDULE_PERIOD")
storage = os.Getenv("STORAGE")
accessKey = os.Getenv("ACCESS_KEY")
secretKey = os.Getenv("SECRET_KEY")
bucketName = os.Getenv("BUCKETNAME")
s3Endpoint = os.Getenv("S3_ENDPOINT")
}
func version() {
fmt.Printf("Version: %s \n", appVersion)
fmt.Print()
}
func main() {
os.Setenv("STORAGE_PATH", storagePath)
if startBackup {
start()
}
}
func start() {
if executionMode == "default" {
if operation != "backup" {
if storage != "s3" {
utils.Info("Restore from local")
restore()
} else {
utils.Info("Restore from s3")
s3Restore()
}
} else {
if storage != "s3" {
utils.Info("Backup to local storage")
backup()
} else {
utils.Info("Backup to s3 storage")
s3Backup()
}
}
} else if executionMode == "scheduled" {
scheduledMode()
} else {
utils.Fatal("Error, unknown execution mode!")
}
}
func backup() {
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
utils.Fatal("Please make sure all required environment variables for database are set")
} else {
testDatabaseConnection()
// Backup database
utils.Info("Backing up database...")
bkFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
os.Setenv("PGPASSWORD", dbPassword)
if disableCompression {
bkFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
cmd := exec.Command("pg_dump",
"-h", dbHost,
"-p", dbPort,
"-U", dbUserName,
"-d", dbName,
)
output, err := cmd.Output()
if err != nil {
log.Fatal(err)
}
file, err := os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
if err != nil {
log.Fatal(err)
}
defer file.Close()
_, err = file.Write(output)
if err != nil {
log.Fatal(err)
}
utils.Info("Database has been backed up")
} else {
cmd := exec.Command("pg_dump",
"-h", dbHost,
"-p", dbPort,
"-U", dbUserName,
"-d", dbName,
)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
gzipCmd.Start()
if err != nil {
log.Fatal(err)
}
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
if err := gzipCmd.Wait(); err != nil {
log.Fatal(err)
}
utils.Info("Database has been backed up")
}
historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", storagePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
defer historyFile.Close()
if _, err := historyFile.WriteString(bkFileName + "\n"); err != nil {
log.Fatal(err)
}
}
}
func restore() {
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" {
utils.Fatal("Please make sure all required environment variables are set")
} else {
if utils.FileExists(fmt.Sprintf("%s/%s", storagePath, file)) {
testDatabaseConnection()
os.Setenv("PGPASSWORD", dbPassword)
extension := filepath.Ext(fmt.Sprintf("%s/%s", storagePath, file))
// GZ compressed file
if extension == ".gz" {
str := "zcat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME")
output, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database")
}
outputContent = string(output)
utils.Info("Database has been restored")
} else if extension == ".sql" {
//SQL file
str := "cat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME")
output, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatalf("Error in restoring the database", err)
}
outputContent = string(output)
utils.Info("Database has been restored")
} else {
utils.Fatal("Unknown file extension ", extension)
}
} else {
utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", storagePath, file))
}
}
}
func s3Backup() {
// Implement S3 backup logic
s3Mount()
backup()
}
// Run in scheduled mode
func scheduledMode() {
// Verify operation
if operation == "backup" {
fmt.Println()
fmt.Println("**********************************")
fmt.Println(" Starting PostgreSQL Bkup... ")
fmt.Println("***********************************")
utils.Info("Running in Scheduled mode")
utils.Info("Log file in /var/log/pg-bkup.log")
utils.Info("Execution period ", os.Getenv("SCHEDULE_PERIOD"))
testDatabaseConnection()
utils.Info("Creating backup job...")
createCrontabScript()
supervisordCmd := exec.Command("supervisord", "-c", "/etc/supervisor/supervisord.conf")
if err := supervisordCmd.Run(); err != nil {
utils.Fatalf("Error starting supervisord: %v\n", err)
}
} else {
utils.Fatal("Scheduled mode supports only backup operation")
}
}
// Mount s3 using s3fs
func s3Mount() {
if accessKey == "" || secretKey == "" || bucketName == "" {
utils.Fatal("Please make sure all environment variables are set")
} else {
storagePath = fmt.Sprintf("%s%s", s3MountPath, s3Path)
os.Setenv("STORAGE_PATH", storagePath)
//Write file
err := utils.WriteToFile(s3fsPasswdFile, fmt.Sprintf("%s:%s", accessKey, secretKey))
if err != nil {
utils.Fatal("Error creating file")
}
//Change file permission
utils.ChangePermission(s3fsPasswdFile, 0600)
utils.Info("Mounting Object storage in", s3MountPath)
if isEmpty, _ := utils.IsDirEmpty(s3MountPath); isEmpty {
cmd := exec.Command("s3fs", bucketName, s3MountPath,
"-o", "passwd_file="+s3fsPasswdFile,
"-o", "use_cache=/tmp/s3cache",
"-o", "allow_other",
"-o", "url="+s3Endpoint,
"-o", "use_path_request_style",
)
if err := cmd.Run(); err != nil {
utils.Fatalf("Error mounting Object storage:", err)
}
if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
utils.Fatalf("Error creating directory %v %v", storagePath, err)
}
} else {
utils.Info("Object storage already mounted in " + s3MountPath)
if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
utils.Fatal("Error creating directory "+storagePath, err)
}
}
}
}
func s3Restore() {
// Implement S3 restore logic\
s3Mount()
restore()
}
func createCrontabScript() {
task := "/usr/local/bin/backup_cron.sh"
touchCmd := exec.Command("touch", task)
if err := touchCmd.Run(); err != nil {
utils.Fatalf("Error creating file %s: %v\n", task, err)
}
var disableC string = ""
if disableCompression {
disableC = "--disable-compression"
}
var scriptContent string
if storage == "s3" {
scriptContent = fmt.Sprintf(`#!/usr/bin/env bash
set -e
bkup --operation backup --dbname %s --port %s --storage s3 --path %s %v
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), os.Getenv("S3_PATH"), disableC)
} else {
scriptContent = fmt.Sprintf(`#!/usr/bin/env bash
set -e
bkup --operation backup --dbname %s --port %s %v
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), disableC)
}
if err := utils.WriteToFile(task, scriptContent); err != nil {
utils.Fatalf("Error writing to %s: %v\n", task, err)
}
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
if err := chmodCmd.Run(); err != nil {
utils.Fatalf("Error changing permissions of %s: %v\n", task, err)
}
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
if err := lnCmd.Run(); err != nil {
utils.Fatalf("Error creating symbolic link: %v\n", err)
}
cronJob := "/etc/cron.d/backup_cron"
touchCronCmd := exec.Command("touch", cronJob)
if err := touchCronCmd.Run(); err != nil {
utils.Fatalf("Error creating file %s: %v\n", cronJob, err)
}
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> /var/log/mysql-bkup.log"
`, os.Getenv("SCHEDULE_PERIOD"))
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
utils.Fatalf("Error writing to %s: %v\n", cronJob, err)
}
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
crontabCmd := exec.Command("crontab", "/etc/cron.d/backup_cron")
if err := crontabCmd.Run(); err != nil {
utils.Fatal("Error updating crontab: ", err)
}
utils.Info("Starting backup in scheduled mode")
}
// testDatabaseConnection tests the database connection
func testDatabaseConnection() {
utils.Info("Testing database connection...")
// Test database connection
}

View File

@@ -1,35 +0,0 @@
FROM ubuntu:24.04
ENV DB_HOST=?
ENV DB_NAME=""
ENV DB_USERNAME=?
ENV DB_PASSWORD=?
ENV DB_PORT="5432"
ENV STORAGE=local
ENV BUCKETNAME=""
ENV ACCESS_KEY=""
ENV SECRET_KEY=""
ENV S3_ENDPOINT=https://s3.amazonaws.com
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="0.2"
RUN apt-get update -qq
RUN apt install s3fs postgresql-client postgresql-client-common libpq-dev supervisor cron -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir /s3mnt
RUN mkdir /tmp/s3cache
RUN chmod 777 /s3mnt
RUN chmod 777 /tmp/s3cache
COPY src/pg_bkup.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/pg_bkup.sh
ADD src/supervisord.conf /etc/supervisor/supervisord.conf
RUN ln -s /usr/local/bin/pg_bkup.sh /usr/local/bin/pg_bkup
RUN ln -s /usr/local/bin/pg_bkup.sh /usr/local/bin/bkup
RUN mkdir /backup
WORKDIR /backup

View File

@@ -1,301 +0,0 @@
#!/usr/bin/env bash
# PosgreSQL Backup & Restore
# @author Jonas Kaninda
# @license MIT License <https://opensource.org/licenses/MIT>
# @link https://github.com/jkaninda/pg-bkup
#
set -e
TIME=$(date +%Y%m%d_%H%M%S)
arg0=$(basename "$0" .sh)
blnk=$(echo "$arg0" | sed 's/./ /g')
export OPERATION=backup
export STORAGE=local
export STORAGE_PATH=/backup
export S3_PATH=/pg-bkup
export TIMEOUT=60
export PGPASSWORD=""
export FILE_COMPRESION=true
export CONNECTION=""
export EXECUTION_MODE="default"
export SCHEDULE_PERIOD="0 1 * * *"
export FILE_COMPRESION=true
usage_info()
{
echo "Usage: \\"
echo " $blnk Backup: pg_bkup -o backup -s s3 \\"
echo " $blnk Restore: pg_bkup -o restore -s s3 -f my_db.sql \\"
echo " $blnk [-o|--operation] [{-f|--file} ] [{-s|--storage} ] [{-h|--help} ] \\"
}
version_info()
{
echo "Version: $VERSION"
exit 0
}
usage()
{
exec 1>2 # Send standard output to standard error
usage_info
exit 0
}
error()
{
echo "$arg0: $*" >&2
exit 0
}
info() {
{ set +x; } 2> /dev/null
echo 'pg-bkup:' '[INFO] ' "$@"
#set -x
}
warning() {
{ set +x; } 2> /dev/null
echo 'pg-bkup:' '[WARNING] ' "$@"
}
fatal() {
{ set +x; } 2> /dev/null
echo 'pg-bkup:' '[ERROR] ' "$@" >&2
exit 1
}
help()
{
echo
echo " -o |--operation -- Set operation (default: backup)"
echo " -s |--storage -- Set storage (default: local)"
echo " -f |--file -- Set file name "
echo " |--path -- Set s3 path, without file name"
echo " -d |--dbname -- Set database name "
echo " -p |--port -- Set database port (default: 3306)"
echo " -m |--mode -- Set execution mode (default: default)"
echo " |--period -- Set schedule period time (default: '0 1 * * *')"
echo " -t |--timeout -- Set timeout (default: 120s)"
echo " -h |--help -- Print this help message and exit"
echo " -V |--version -- Print version information and exit"
exit 0
}
flags()
{
while test $# -gt 0
do
case "$1" in
(-o|--operation)
shift
[ $# = 0 ] && error "No operation specified - restore or backup"
export OPERATION="$1"
shift;;
(-d|--dbname)
shift
[ $# = 0 ] && error "No database name specified"
export DB_NAME="$1"
shift;;
(-s|--storage)
shift
[ $# = 0 ] && error "No storage specified - local or s3 | default local"
export STORAGE="$1"
shift;;
(-f|--file)
shift
[ $# = 0 ] && error "No file specified - file to restore"
export FILE_NAME="$1"
shift;;
(--path)
shift
[ $# = 0 ] && error "No s3 path specified - s3 path without file name"
export S3_PATH="$1"
shift;;
(-db|--database)
shift
[ $# = 0 ] && error "No database name specified"
export DB_NAME="$1"
shift;;
(-p|--port)
shift
[ $# = 0 ] && error "No database name specified"
export DB_PORT="$1"
shift;;
(-m|--mode)
shift
[ $# = 0 ] && error "No execution mode specified"
export EXECUTION_MODE="$1"
shift;;
(--period)
shift
[ $# = 0 ] && error "No schedule period entered"
export SCHEDULE_PERIOD="$1"
shift;;
(-t|--timeout)
shift
[ $# = 0 ] && error "No timeout specified"
export TIMEOUT="$1"
shift;;
(-h|--help)
help;;
(-V|--version)
version_info;;
(--)
help;;
(*) usage;;
esac
done
}
create_pgpass(){
export CONNECTION=${DB_HOST}:${DB_PORT}:${DB_DATABASE}:${DB_USERNAME}:${DB_PASSWORD}
echo $CONNECTION > ~/.pgpass
chmod 600 ~/.pgpass
}
backup()
{
if [[ -z $DB_HOST ]] || [[ -z $DB_NAME ]] || [[ -z $DB_USERNAME ]] || [[ -z $DB_PASSWORD ]]; then
fatal "Please make sure all required options are set "
else
export PGPASSWORD=${DB_PASSWORD}
## Test database connection
export BK_FILE_NAME="${DB_NAME}_${TIME}.sql.gz"
## Backup database
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USERNAME} -d ${DB_NAME} | gzip > ${STORAGE_PATH}/$BK_FILE_NAME
if [[ $? -eq 0 ]];then
echo $BK_FILE_NAME | tee -a "${STORAGE_PATH}/history.txt"
info "Database has been backed up"
else
fatal "An error occurred during the backup"
fi
fi
exit 0
}
restore()
{
if [[ -z $DB_HOST ]] || [[ -z $DB_NAME ]] || [[ -z $DB_USERNAME ]] || [[ -z $DB_PASSWORD ]]; then
fatal "Please make sure all required options are set "
else
## Restore database
export PGPASSWORD=$DB_PASSWORD
if [ -f "${STORAGE_PATH}/$FILE_NAME" ]; then
if gzip -t $STORAGE_PATH/$FILE_NAME; then
zcat ${STORAGE_PATH}/${FILE_NAME} | psql -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USERNAME} -v -d ${DB_NAME}
else
cat ${STORAGE_PATH}/${FILE_NAME} | psql -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USERNAME} -v -d ${DB_NAME}
fi
info "Database has been restored"
else
fatal "File not found in ${STORAGE_PATH}/${FILE_NAME}"
fi
fi
exit
}
s3_backup()
{
mount_s3
backup
}
s3_restore()
{
mount_s3
restore
}
mount_s3()
{
if [[ -z $ACCESS_KEY ]] || [[ -z $SECRET_KEY ]]; then
fatal "Please make sure all environment variables are set "
else
echo "$ACCESS_KEY:$SECRET_KEY" | tee /etc/passwd-s3fs
chmod 600 /etc/passwd-s3fs
info "Mounting Object storage in /s3mnt .... "
if [ -z "$(ls -A /s3mnt)" ]; then
s3fs $BUCKETNAME /s3mnt -o passwd_file=/etc/passwd-s3fs -o use_cache=/tmp/s3cache -o allow_other -o url=$S3_ENDPOINT -o use_path_request_style
if [ ! -d "/s3mnt$S3_PATH" ]; then
mkdir -p /s3mnt$S3_PATH
fi
else
info "Object storage already mounted in /s3mnt"
fi
export STORAGE_PATH=/s3mnt$S3_PATH
fi
}
create_crontab_script()
{
TASK=/usr/local/bin/backup_cron.sh
touch $TASK
if [ $STORAGE == 's3' ]
then
cat > "$TASK" <<EOF
#!/usr/bin/env bash
set -e
bkup --operation backup --dbname $DB_NAME --port $DB_PORT --storage s3 --path $S3_PATH
EOF
else
cat > "$TASK" <<EOF
#!/usr/bin/env bash
set -e
bkup --operation backup --dbname $DB_NAME --port $DB_PORT
EOF
fi
chmod +x /usr/local/bin/backup_cron.sh
ln -s /usr/local/bin/backup_cron.sh /usr/local/bin/backup_cron
## Create crontab job
CRON_JOB=/etc/cron.d/backup_cron
touch $CRON_JOB
cat > "$CRON_JOB" <<EOF
$SCHEDULE_PERIOD root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> /var/log/pg-bkup.log"
EOF
chmod 0644 /etc/cron.d/*
crontab /etc/cron.d/backup_cron
}
scheduled_mode()
{
if [ $OPERATION == 'backup' ]
then
create_crontab_script
echo ""
echo "**********************************"
echo " Starting PostgreSQL Bkup... "
echo "***********************************"
info "Running in Scheduled mode "
info "Execution period $SCHEDULE_PERIOD"
info "Log file in /var/log/pg-bkup.log "
supervisord -c /etc/supervisor/supervisord.conf
else
fatal "Scheduled mode supports only backup operation"
fi
}
flags "$@"
if [ $EXECUTION_MODE == 'default' ]
then
if [ $OPERATION != 'backup' ]
then
if [ $STORAGE != 's3' ]
then
info "Restore from local"
restore
else
info "Restore from s3"
s3_restore
fi
else
if [ $STORAGE != 's3' ]
then
info "Backup to local storage"
backup
else
info "Backup to s3 storage"
s3_backup
fi
fi
elif [ $EXECUTION_MODE == 'scheduled' ]
then
scheduled_mode
else
fatal "Unknow execution mode"
fi

72
utils/utils.go Normal file
View File

@@ -0,0 +1,72 @@
package utils
/*****
* PostgreSQL Backup & Restore
* @author Jonas Kaninda
* @license MIT License <https://opensource.org/licenses/MIT>
* @link https://github.com/jkaninda/pg-bkup
**/
import (
"fmt"
"io/fs"
"os"
)
func Info(v ...any) {
fmt.Println("[INFO] ", fmt.Sprint(v...))
}
func Infof(msg string, v ...any) {
fmt.Printf("[INFO] "+msg, v...)
}
func Warning(message string) {
fmt.Println("[WARNING]", message)
}
func Warningf(msg string, v ...any) {
fmt.Printf("[WARNING] "+msg, v...)
}
func Fatal(v ...any) {
fmt.Println("[ERROR] ", fmt.Sprint(v...))
os.Exit(1)
}
func Fatalf(msg string, v ...any) {
fmt.Printf("[ERROR] "+msg, v...)
os.Exit(1)
}
func FileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func WriteToFile(filePath, content string) error {
file, err := os.Create(filePath)
if err != nil {
return err
}
defer file.Close()
_, err = file.WriteString(content)
return err
}
func ChangePermission(filePath string, mod int) {
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
Fatalf("Error changing permissions of %s: %v\n", filePath, err)
}
}
func IsDirEmpty(name string) (bool, error) {
f, err := os.Open(name)
if err != nil {
return false, err
}
defer f.Close()
_, err = f.Readdirnames(1)
if err == nil {
return false, nil
}
return true, nil
}