Refactoring of code
This commit is contained in:
10
Makefile
10
Makefile
@@ -19,20 +19,20 @@ docker-build:
|
|||||||
docker-run: docker-build
|
docker-run: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --prune --keep-last 2
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --prune --keep-last 2
|
||||||
docker-restore: docker-build
|
docker-restore: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore -f uzaraka_20240729_200543.sql.gz.gpg
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore -f ${FILE_NAME}
|
||||||
|
|
||||||
|
|
||||||
docker-run-scheduled: docker-build
|
docker-run-scheduled: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/pg-bkup bkup backup --mode scheduled --period "* * * * *"
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --mode scheduled --period "* * * * *"
|
||||||
|
|
||||||
|
|
||||||
docker-run-scheduled-s3: docker-build
|
docker-run-scheduled-s3: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
||||||
|
|
||||||
docker-run-s3: docker-build
|
docker-run-s3: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path
|
||||||
|
|
||||||
|
|
||||||
docker-restore-s3: docker-build
|
docker-restore-s3: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path -f uzaraka_20240729_205710.sql.gz.gpg
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path -f $FILE_NAME
|
||||||
|
|
||||||
|
|||||||
@@ -19,17 +19,21 @@ ENV STORAGE=local
|
|||||||
ENV BUCKET_NAME=""
|
ENV BUCKET_NAME=""
|
||||||
ENV ACCESS_KEY=""
|
ENV ACCESS_KEY=""
|
||||||
ENV SECRET_KEY=""
|
ENV SECRET_KEY=""
|
||||||
ENV REGION=""
|
ENV AWS_REGION="us-west-2"
|
||||||
|
ENV AWS_DISABLE_SSL="false"
|
||||||
|
ENV GPG_PASSPHRASE=""
|
||||||
ENV SSH_USER=""
|
ENV SSH_USER=""
|
||||||
ENV SSH_PASSWORD=""
|
ENV SSH_PASSWORD=""
|
||||||
ENV SSH_HOST_NAME=""
|
ENV SSH_HOST_NAME=""
|
||||||
ENV SSH_IDENTIFY_FILE="/root/.ssh/id_rsa"
|
ENV SSH_IDENTIFY_FILE="/root/.ssh/id_rsa"
|
||||||
ENV GPG_PASS_PHRASE=""
|
|
||||||
ENV SSH_PORT="22"
|
ENV SSH_PORT="22"
|
||||||
ENV S3_ENDPOINT=https://s3.amazonaws.com
|
ENV S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ENV VERSION="v0.6"
|
ENV VERSION="v0.8"
|
||||||
LABEL authors="Jonas Kaninda"
|
ARG WORKDIR="/app"
|
||||||
|
ARG BACKUPDIR="/backup"
|
||||||
|
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||||
|
LABEL author="Jonas Kaninda"
|
||||||
|
|
||||||
RUN apt-get update -qq
|
RUN apt-get update -qq
|
||||||
|
|
||||||
@@ -38,10 +42,12 @@ RUN apt install postgresql-client postgresql-client-common supervisor cron opens
|
|||||||
# Clear cache
|
# Clear cache
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /s3mnt
|
RUN mkdir $WORKDIR
|
||||||
RUN mkdir /tmp/s3cache
|
RUN mkdir $BACKUPDIR
|
||||||
RUN chmod 777 /s3mnt
|
RUN mkdir -p $BACKUP_TMP_DIR
|
||||||
RUN chmod 777 /tmp/s3cache
|
RUN chmod 777 $WORKDIR
|
||||||
|
RUN chmod 777 $BACKUPDIR
|
||||||
|
RUN chmod 777 $BACKUP_TMP_DIR
|
||||||
|
|
||||||
COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup
|
COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup
|
||||||
RUN chmod +x /usr/local/bin/pg-bkup
|
RUN chmod +x /usr/local/bin/pg-bkup
|
||||||
@@ -50,7 +56,4 @@ RUN ln -s /usr/local/bin/pg-bkup /usr/local/bin/bkup
|
|||||||
|
|
||||||
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
||||||
|
|
||||||
|
WORKDIR $WORKDIR
|
||||||
RUN mkdir /backup
|
|
||||||
RUN mkdir /tmp/backup
|
|
||||||
WORKDIR /root
|
|
||||||
|
|||||||
@@ -48,17 +48,14 @@ func StartBackup(cmd *cobra.Command) {
|
|||||||
if executionMode == "default" {
|
if executionMode == "default" {
|
||||||
switch storage {
|
switch storage {
|
||||||
case "s3":
|
case "s3":
|
||||||
utils.Info("Backup database to s3 storage")
|
|
||||||
s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption)
|
s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption)
|
||||||
case "local":
|
case "local":
|
||||||
utils.Info("Backup database to local storage")
|
|
||||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||||
case "ssh":
|
case "ssh":
|
||||||
fmt.Println("x is 2")
|
fmt.Println("x is 2")
|
||||||
case "ftp":
|
case "ftp":
|
||||||
fmt.Println("x is 3")
|
fmt.Println("x is 3")
|
||||||
default:
|
default:
|
||||||
utils.Info("Backup database to local storage")
|
|
||||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,7 +91,7 @@ func scheduledMode() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Failed to start supervisord: %v", err)
|
utils.Fatal("Failed to start supervisord: %v", err)
|
||||||
}
|
}
|
||||||
utils.Info("Starting backup job...")
|
utils.Info("Backup job started")
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := cmd.Process.Kill(); err != nil {
|
if err := cmd.Process.Kill(); err != nil {
|
||||||
utils.Info("Failed to kill supervisord process: %v", err)
|
utils.Info("Failed to kill supervisord process: %v", err)
|
||||||
@@ -203,6 +200,7 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
|
|||||||
encryptBackup(backupFileName)
|
encryptBackup(backupFileName)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
|
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
|
||||||
}
|
}
|
||||||
|
utils.Info("Backup name is ", finalFileName)
|
||||||
moveToBackup(finalFileName, storagePath)
|
moveToBackup(finalFileName, storagePath)
|
||||||
//Delete old backup
|
//Delete old backup
|
||||||
if prune {
|
if prune {
|
||||||
@@ -213,6 +211,7 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
|
|||||||
func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||||
bucket := os.Getenv("BUCKET_NAME")
|
bucket := os.Getenv("BUCKET_NAME")
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
storagePath = os.Getenv("STORAGE_PATH")
|
||||||
|
utils.Info("Backup database to s3 storage")
|
||||||
//Backup database
|
//Backup database
|
||||||
BackupDatabase(backupFileName, disableCompression)
|
BackupDatabase(backupFileName, disableCompression)
|
||||||
finalFileName := backupFileName
|
finalFileName := backupFileName
|
||||||
@@ -220,7 +219,8 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru
|
|||||||
encryptBackup(backupFileName)
|
encryptBackup(backupFileName)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||||
}
|
}
|
||||||
utils.Info("Uploading file to S3 storage")
|
utils.Info("Uploading backup file to S3 storage...")
|
||||||
|
utils.Info("Backup name is ", backupFileName)
|
||||||
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error uploading file to S3: %s ", err)
|
utils.Fatalf("Error uploading file to S3: %s ", err)
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func Decrypt(inputFile string, passphrase string) error {
|
func Decrypt(inputFile string, passphrase string) error {
|
||||||
utils.Info("Decrypting backup...")
|
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
||||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
|
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
|
||||||
cmd.Stdout = os.Stdout
|
cmd.Stdout = os.Stdout
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ func moveToBackup(backupFileName string, destinationPath string) {
|
|||||||
fmt.Println("Error deleting file:", err)
|
fmt.Println("Error deleting file:", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
utils.Done("Database has been backed up and copied to destination ")
|
utils.Done("Database has been backed up and copied to ", filepath.Join(destinationPath, backupFileName))
|
||||||
}
|
}
|
||||||
func deleteOldBackup(retentionDays int) {
|
func deleteOldBackup(retentionDays int) {
|
||||||
utils.Info("Deleting old backups...")
|
utils.Info("Deleting old backups...")
|
||||||
|
|||||||
@@ -74,5 +74,5 @@ bkup backup --dbname %s --port %s %v
|
|||||||
if err := crontabCmd.Run(); err != nil {
|
if err := crontabCmd.Run(); err != nil {
|
||||||
utils.Fatal("Error updating crontab: ", err)
|
utils.Fatal("Error updating crontab: ", err)
|
||||||
}
|
}
|
||||||
utils.Info("Starting backup in scheduled mode")
|
utils.Info("Backup job created.")
|
||||||
}
|
}
|
||||||
|
|||||||
12
utils/s3.go
12
utils/s3.go
@@ -12,25 +12,27 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateSession creates a new AWS session
|
// CreateSession creates a new AWS session
|
||||||
func CreateSession() (*session.Session, error) {
|
func CreateSession() (*session.Session, error) {
|
||||||
|
|
||||||
//key := aws.String("testobject")
|
|
||||||
endPoint := os.Getenv("S3_ENDPOINT")
|
endPoint := os.Getenv("S3_ENDPOINT")
|
||||||
//bucket := os.Getenv("BUCKET_NAME")
|
|
||||||
region := os.Getenv("REGION")
|
|
||||||
accessKey := os.Getenv("ACCESS_KEY")
|
accessKey := os.Getenv("ACCESS_KEY")
|
||||||
secretKey := os.Getenv("SECRET_KEY")
|
secretKey := os.Getenv("SECRET_KEY")
|
||||||
|
region := os.Getenv("AWS_REGION")
|
||||||
|
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
||||||
|
if err != nil {
|
||||||
|
Fatalf("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
||||||
|
}
|
||||||
// Configure to use MinIO Server
|
// Configure to use MinIO Server
|
||||||
s3Config := &aws.Config{
|
s3Config := &aws.Config{
|
||||||
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
|
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
|
||||||
Endpoint: aws.String(endPoint),
|
Endpoint: aws.String(endPoint),
|
||||||
Region: aws.String(region),
|
Region: aws.String(region),
|
||||||
DisableSSL: aws.Bool(false),
|
DisableSSL: aws.Bool(awsDisableSsl),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
}
|
}
|
||||||
return session.NewSession(s3Config)
|
return session.NewSession(s3Config)
|
||||||
|
|||||||
@@ -7,11 +7,13 @@ package utils
|
|||||||
* @link https://github.com/jkaninda/mysql-bkup
|
* @link https://github.com/jkaninda/mysql-bkup
|
||||||
**/
|
**/
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Info(v ...any) {
|
func Info(v ...any) {
|
||||||
@@ -105,8 +107,46 @@ func IsDirEmpty(name string) (bool, error) {
|
|||||||
|
|
||||||
// TestDatabaseConnection tests the database connection
|
// TestDatabaseConnection tests the database connection
|
||||||
func TestDatabaseConnection() {
|
func TestDatabaseConnection() {
|
||||||
Info("Testing database connection...")
|
dbHost := os.Getenv("DB_HOST")
|
||||||
// Test database connection
|
dbPassword := os.Getenv("DB_PASSWORD")
|
||||||
|
dbUserName := os.Getenv("DB_USERNAME")
|
||||||
|
dbName := os.Getenv("DB_NAME")
|
||||||
|
dbPort := os.Getenv("DB_PORT")
|
||||||
|
|
||||||
|
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
|
||||||
|
Fatal("Please make sure all required database environment variables are set")
|
||||||
|
} else {
|
||||||
|
Info("Connecting to database ...")
|
||||||
|
// Test database connection
|
||||||
|
query := "SELECT version();"
|
||||||
|
|
||||||
|
// Set the environment variable for the database password
|
||||||
|
err := os.Setenv("PGPASSWORD", dbPassword)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Prepare the psql command
|
||||||
|
cmd := exec.Command("psql",
|
||||||
|
"-U", dbUserName, // database user
|
||||||
|
"-d", dbName, // database name
|
||||||
|
"-h", dbHost, // host
|
||||||
|
"-p", dbPort, // port
|
||||||
|
"-c", query, // SQL command to execute
|
||||||
|
)
|
||||||
|
// Capture the output
|
||||||
|
var out bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
cmd.Stderr = &out
|
||||||
|
|
||||||
|
// Run the command and capture any errors
|
||||||
|
err = cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error running psql command: %v\nOutput: %s\n", err, out.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
Info("Successfully connected to database")
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
|
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
|
||||||
value, _ := cmd.Flags().GetString(flagName)
|
value, _ := cmd.Flags().GetString(flagName)
|
||||||
|
|||||||
Reference in New Issue
Block a user