Add backup encryption and decryption with GPG
This commit is contained in:
8
Makefile
8
Makefile
@@ -17,7 +17,9 @@ docker-build:
|
|||||||
docker build -f docker/Dockerfile -t jkaninda/pg-bkup:latest .
|
docker build -f docker/Dockerfile -t jkaninda/pg-bkup:latest .
|
||||||
|
|
||||||
docker-run: docker-build
|
docker-run: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/pg-bkup bkup backup --prune --keep-last 2
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --prune --keep-last 2
|
||||||
|
docker-restore: docker-build
|
||||||
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore -f uzaraka_20240729_200543.sql.gz.gpg
|
||||||
|
|
||||||
|
|
||||||
docker-run-scheduled: docker-build
|
docker-run-scheduled: docker-build
|
||||||
@@ -28,9 +30,9 @@ docker-run-scheduled-s3: docker-build
|
|||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
||||||
|
|
||||||
docker-run-s3: docker-build
|
docker-run-s3: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path
|
||||||
|
|
||||||
|
|
||||||
docker-restore-s3: docker-build
|
docker-restore-s3: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "FILE_NAME=${FILE_NAME}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path
|
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path -f uzaraka_20240729_205710.sql.gz.gpg
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ LABEL authors="Jonas Kaninda"
|
|||||||
|
|
||||||
RUN apt-get update -qq
|
RUN apt-get update -qq
|
||||||
|
|
||||||
RUN apt install postgresql-client postgresql-client-common supervisor cron openssh-client -y
|
RUN apt install postgresql-client postgresql-client-common supervisor cron openssh-client gnupg -y
|
||||||
|
|
||||||
# Clear cache
|
# Clear cache
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
@@ -52,5 +52,5 @@ ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
|||||||
|
|
||||||
|
|
||||||
RUN mkdir /backup
|
RUN mkdir /backup
|
||||||
RUN mkdir /tmp/pg-bkup
|
RUN mkdir /tmp/backup
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|||||||
124
pkg/backup.go
124
pkg/backup.go
@@ -18,7 +18,6 @@ import (
|
|||||||
|
|
||||||
func StartBackup(cmd *cobra.Command) {
|
func StartBackup(cmd *cobra.Command) {
|
||||||
_, _ = cmd.Flags().GetString("operation")
|
_, _ = cmd.Flags().GetString("operation")
|
||||||
|
|
||||||
//Set env
|
//Set env
|
||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||||
@@ -29,12 +28,16 @@ func StartBackup(cmd *cobra.Command) {
|
|||||||
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
|
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||||
keepLast, _ := cmd.Flags().GetInt("keep-last")
|
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
||||||
prune, _ := cmd.Flags().GetBool("prune")
|
prune, _ := cmd.Flags().GetBool("prune")
|
||||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||||
executionMode, _ = cmd.Flags().GetString("mode")
|
executionMode, _ = cmd.Flags().GetString("mode")
|
||||||
dbName = os.Getenv("DB_NAME")
|
dbName = os.Getenv("DB_NAME")
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
|
//
|
||||||
|
if gpgPassphrase != "" {
|
||||||
|
encryption = true
|
||||||
|
}
|
||||||
|
|
||||||
//Generate file name
|
//Generate file name
|
||||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
|
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
|
||||||
@@ -46,20 +49,17 @@ func StartBackup(cmd *cobra.Command) {
|
|||||||
switch storage {
|
switch storage {
|
||||||
case "s3":
|
case "s3":
|
||||||
utils.Info("Backup database to s3 storage")
|
utils.Info("Backup database to s3 storage")
|
||||||
BackupDatabase(backupFileName, disableCompression, prune, keepLast)
|
s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption)
|
||||||
s3Upload(backupFileName, s3Path)
|
|
||||||
case "local":
|
case "local":
|
||||||
utils.Info("Backup database to local storage")
|
utils.Info("Backup database to local storage")
|
||||||
BackupDatabase(backupFileName, disableCompression, prune, keepLast)
|
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||||
moveToBackup(backupFileName, storagePath)
|
|
||||||
case "ssh":
|
case "ssh":
|
||||||
fmt.Println("x is 2")
|
fmt.Println("x is 2")
|
||||||
case "ftp":
|
case "ftp":
|
||||||
fmt.Println("x is 3")
|
fmt.Println("x is 3")
|
||||||
default:
|
default:
|
||||||
utils.Info("Backup database to local storage")
|
utils.Info("Backup database to local storage")
|
||||||
BackupDatabase(backupFileName, disableCompression, prune, keepLast)
|
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||||
moveToBackup(backupFileName, storagePath)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if executionMode == "scheduled" {
|
} else if executionMode == "scheduled" {
|
||||||
@@ -117,7 +117,7 @@ func scheduledMode() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BackupDatabase backup database
|
// BackupDatabase backup database
|
||||||
func BackupDatabase(backupFileName string, disableCompression bool, prune bool, keepLast int) {
|
func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||||
dbHost = os.Getenv("DB_HOST")
|
dbHost = os.Getenv("DB_HOST")
|
||||||
dbPassword = os.Getenv("DB_PASSWORD")
|
dbPassword = os.Getenv("DB_PASSWORD")
|
||||||
dbUserName = os.Getenv("DB_USERNAME")
|
dbUserName = os.Getenv("DB_USERNAME")
|
||||||
@@ -190,43 +190,38 @@ func BackupDatabase(backupFileName string, disableCompression bool, prune bool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
utils.Done("Database has been backed up")
|
utils.Info("Database has been backed up")
|
||||||
|
|
||||||
//Delete old backup
|
|
||||||
//if prune {
|
|
||||||
// deleteOldBackup(keepLast)
|
|
||||||
//}
|
|
||||||
|
|
||||||
historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", tmpPath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer historyFile.Close()
|
|
||||||
if _, err := historyFile.WriteString(backupFileName + "\n"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
func moveToBackup(backupFileName string, destinationPath string) {
|
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||||
//Copy backup from tmp folder to storage destination
|
utils.Info("Backup database to local storage")
|
||||||
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
|
BackupDatabase(backupFileName, disableCompression)
|
||||||
if err != nil {
|
finalFileName := backupFileName
|
||||||
utils.Fatal("Error copying file ", backupFileName, err)
|
if encrypt {
|
||||||
|
encryptBackup(backupFileName)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
|
||||||
}
|
}
|
||||||
//Delete backup file from tmp folder
|
moveToBackup(finalFileName, storagePath)
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
//Delete old backup
|
||||||
if err != nil {
|
if prune {
|
||||||
fmt.Println("Error deleting file:", err)
|
deleteOldBackup(backupRetention)
|
||||||
|
|
||||||
}
|
}
|
||||||
utils.Done("Database has been backed up and copied to destination ")
|
|
||||||
}
|
}
|
||||||
func s3Upload(backupFileName string, s3Path string) {
|
|
||||||
|
func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||||
bucket := os.Getenv("BUCKET_NAME")
|
bucket := os.Getenv("BUCKET_NAME")
|
||||||
|
storagePath = os.Getenv("STORAGE_PATH")
|
||||||
|
//Backup database
|
||||||
|
BackupDatabase(backupFileName, disableCompression)
|
||||||
|
finalFileName := backupFileName
|
||||||
|
if encrypt {
|
||||||
|
encryptBackup(backupFileName)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||||
|
}
|
||||||
utils.Info("Uploading file to S3 storage")
|
utils.Info("Uploading file to S3 storage")
|
||||||
err := utils.UploadFileToS3(tmpPath, backupFileName, bucket, s3Path)
|
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error uploading file to S3: %s ", err)
|
utils.Fatalf("Error uploading file to S3: %s ", err)
|
||||||
|
|
||||||
@@ -238,51 +233,22 @@ func s3Upload(backupFileName string, s3Path string) {
|
|||||||
fmt.Println("Error deleting file:", err)
|
fmt.Println("Error deleting file:", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
// Delete old backup
|
||||||
|
if prune {
|
||||||
|
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Error deleting old backup from S3: %s ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
utils.Done("Database has been backed up and uploaded to s3 ")
|
utils.Done("Database has been backed up and uploaded to s3 ")
|
||||||
}
|
}
|
||||||
func s3Backup(backupFileName string, disableCompression bool, s3Path string, prune bool, keepLast int) {
|
|
||||||
// Backup Database to S3 storage
|
|
||||||
//MountS3Storage(s3Path)
|
|
||||||
//BackupDatabase(backupFileName, disableCompression, prune, keepLast)
|
|
||||||
}
|
|
||||||
func deleteOldBackup(keepLast int) {
|
|
||||||
utils.Info("Deleting old backups...")
|
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
|
||||||
// Define the directory path
|
|
||||||
backupDir := storagePath + "/"
|
|
||||||
// Get current time
|
|
||||||
currentTime := time.Now()
|
|
||||||
// Delete file
|
|
||||||
deleteFile := func(filePath string) error {
|
|
||||||
err := os.Remove(filePath)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error:", err)
|
|
||||||
} else {
|
|
||||||
utils.Done("File ", filePath, " deleted successfully")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk through the directory and delete files modified more than specified days ago
|
func encryptBackup(backupFileName string) {
|
||||||
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
|
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Check if it's a regular file and if it was modified more than specified days ago
|
|
||||||
if fileInfo.Mode().IsRegular() {
|
|
||||||
timeDiff := currentTime.Sub(fileInfo.ModTime())
|
|
||||||
if timeDiff.Hours() > 24*float64(keepLast) {
|
|
||||||
err := deleteFile(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
|
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error:", err)
|
utils.Fatalf("Error during encrypting backup %s", err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
4
pkg/config.go
Normal file
4
pkg/config.go
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
}
|
||||||
48
pkg/encrypt.go
Normal file
48
pkg/encrypt.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jkaninda/pg-bkup/utils"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Decrypt(inputFile string, passphrase string) error {
|
||||||
|
utils.Info("Decrypting backup...")
|
||||||
|
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Backup file decrypted successful!")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Encrypt(inputFile string, passphrase string) error {
|
||||||
|
utils.Info("Encrypting backup...")
|
||||||
|
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Backup file encrypted successful!")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoveLastExtension(filename string) string {
|
||||||
|
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
||||||
|
return filename[:idx]
|
||||||
|
}
|
||||||
|
return filename
|
||||||
|
}
|
||||||
@@ -1 +0,0 @@
|
|||||||
package pkg
|
|
||||||
74
pkg/helper.go
Normal file
74
pkg/helper.go
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jkaninda/pg-bkup/utils"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyToTmp(sourcePath string, backupFileName string) {
|
||||||
|
//Copy backup from storage to /tmp
|
||||||
|
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying file ", backupFileName, err)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func moveToBackup(backupFileName string, destinationPath string) {
|
||||||
|
//Copy backup from tmp folder to storage destination
|
||||||
|
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying file ", backupFileName, err)
|
||||||
|
|
||||||
|
}
|
||||||
|
//Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error deleting file:", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
utils.Done("Database has been backed up and copied to destination ")
|
||||||
|
}
|
||||||
|
func deleteOldBackup(retentionDays int) {
|
||||||
|
utils.Info("Deleting old backups...")
|
||||||
|
storagePath = os.Getenv("STORAGE_PATH")
|
||||||
|
// Define the directory path
|
||||||
|
backupDir := storagePath + "/"
|
||||||
|
// Get current time
|
||||||
|
currentTime := time.Now()
|
||||||
|
// Delete file
|
||||||
|
deleteFile := func(filePath string) error {
|
||||||
|
err := os.Remove(filePath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error:", err)
|
||||||
|
} else {
|
||||||
|
utils.Done("File ", filePath, " deleted successfully")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk through the directory and delete files modified more than specified days ago
|
||||||
|
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Check if it's a regular file and if it was modified more than specified days ago
|
||||||
|
if fileInfo.Mode().IsRegular() {
|
||||||
|
timeDiff := currentTime.Sub(fileInfo.ModTime())
|
||||||
|
if timeDiff.Hours() > 24*float64(retentionDays) {
|
||||||
|
err := deleteFile(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error:", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -33,7 +33,7 @@ func StartRestore(cmd *cobra.Command) {
|
|||||||
RestoreDatabase(file)
|
RestoreDatabase(file)
|
||||||
case "local":
|
case "local":
|
||||||
utils.Info("Restore database from local")
|
utils.Info("Restore database from local")
|
||||||
copyTmp(storagePath, file)
|
copyToTmp(storagePath, file)
|
||||||
RestoreDatabase(file)
|
RestoreDatabase(file)
|
||||||
case "ssh":
|
case "ssh":
|
||||||
fmt.Println("x is 2")
|
fmt.Println("x is 2")
|
||||||
@@ -44,14 +44,6 @@ func StartRestore(cmd *cobra.Command) {
|
|||||||
RestoreDatabase(file)
|
RestoreDatabase(file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func copyTmp(sourcePath string, backupFileName string) {
|
|
||||||
//Copy backup from tmp folder to storage destination
|
|
||||||
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error copying file ", backupFileName, err)
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestoreDatabase restore database
|
// RestoreDatabase restore database
|
||||||
func RestoreDatabase(file string) {
|
func RestoreDatabase(file string) {
|
||||||
@@ -60,10 +52,26 @@ func RestoreDatabase(file string) {
|
|||||||
dbUserName = os.Getenv("DB_USERNAME")
|
dbUserName = os.Getenv("DB_USERNAME")
|
||||||
dbName = os.Getenv("DB_NAME")
|
dbName = os.Getenv("DB_NAME")
|
||||||
dbPort = os.Getenv("DB_PORT")
|
dbPort = os.Getenv("DB_PORT")
|
||||||
|
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
//storagePath = os.Getenv("STORAGE_PATH")
|
//storagePath = os.Getenv("STORAGE_PATH")
|
||||||
if file == "" {
|
if file == "" {
|
||||||
utils.Fatal("Error, file required")
|
utils.Fatal("Error, file required")
|
||||||
}
|
}
|
||||||
|
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
||||||
|
if extension == ".gpg" {
|
||||||
|
if gpgPassphrase == "" {
|
||||||
|
utils.Fatal("Error, GPG_PASSPHRASE environment variable required, you need to set the GPG_PASSPHRASE")
|
||||||
|
} else {
|
||||||
|
//Decrypt file
|
||||||
|
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error decrypting file ", file, err)
|
||||||
|
}
|
||||||
|
//Update file name
|
||||||
|
file = RemoveLastExtension(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" {
|
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" {
|
||||||
utils.Fatal("Please make sure all required environment variables are set")
|
utils.Fatal("Please make sure all required environment variables are set")
|
||||||
@@ -83,7 +91,7 @@ func RestoreDatabase(file string) {
|
|||||||
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME")
|
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME")
|
||||||
_, err := exec.Command("bash", "-c", str).Output()
|
_, err := exec.Command("bash", "-c", str).Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error, in restoring the database")
|
utils.Fatal("Error, in restoring the database ", err)
|
||||||
}
|
}
|
||||||
utils.Done("Database has been restored")
|
utils.Done("Database has been restored")
|
||||||
|
|
||||||
@@ -104,9 +112,3 @@ func RestoreDatabase(file string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//func s3Restore(file, s3Path string) {
|
|
||||||
// // Restore database from S3
|
|
||||||
// MountS3Storage(s3Path)
|
|
||||||
// RestoreDatabase(file)
|
|
||||||
//}
|
|
||||||
|
|||||||
@@ -3,8 +3,10 @@ package pkg
|
|||||||
const s3MountPath string = "/s3mnt"
|
const s3MountPath string = "/s3mnt"
|
||||||
const s3fsPasswdFile string = "/etc/passwd-s3fs"
|
const s3fsPasswdFile string = "/etc/passwd-s3fs"
|
||||||
const cronLogFile = "/var/log/pg-bkup.log"
|
const cronLogFile = "/var/log/pg-bkup.log"
|
||||||
const tmpPath = "/tmp/pg-bkup"
|
const tmpPath = "/tmp/backup"
|
||||||
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
||||||
|
const algorithm = "aes256"
|
||||||
|
const gpgExtension = "gpg"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
storage = "local"
|
storage = "local"
|
||||||
@@ -18,4 +20,5 @@ var (
|
|||||||
executionMode = "default"
|
executionMode = "default"
|
||||||
storagePath = "/backup"
|
storagePath = "/backup"
|
||||||
disableCompression = false
|
disableCompression = false
|
||||||
|
encryption = false
|
||||||
)
|
)
|
||||||
|
|||||||
50
utils/s3.go
50
utils/s3.go
@@ -8,9 +8,11 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateSession creates a new AWS session
|
// CreateSession creates a new AWS session
|
||||||
@@ -81,7 +83,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
Info("Download backup from S3 storage...")
|
||||||
file, err := os.Create(filepath.Join(destinationPath, key))
|
file, err := os.Create(filepath.Join(destinationPath, key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Failed to create file", err)
|
fmt.Println("Failed to create file", err)
|
||||||
@@ -101,7 +103,49 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
|||||||
fmt.Println("Failed to download file", err)
|
fmt.Println("Failed to download file", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Println("Bytes size", numBytes)
|
Info("Backup downloaded: ", file.Name())
|
||||||
Info("Backup downloaded to ", file.Name())
|
Info("Bytes size: ", numBytes)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func DeleteOldBackup(bucket, prefix string, retention int) error {
|
||||||
|
sess, err := CreateSession()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
svc := s3.New(sess)
|
||||||
|
|
||||||
|
// Get the current time and the time threshold for 7 days ago
|
||||||
|
now := time.Now()
|
||||||
|
backupRetentionDays := now.AddDate(0, 0, -retention)
|
||||||
|
|
||||||
|
// List objects in the bucket
|
||||||
|
listObjectsInput := &s3.ListObjectsV2Input{
|
||||||
|
Bucket: aws.String(bucket),
|
||||||
|
Prefix: aws.String(prefix),
|
||||||
|
}
|
||||||
|
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||||
|
for _, object := range page.Contents {
|
||||||
|
if object.LastModified.Before(backupRetentionDays) {
|
||||||
|
// Object is older than retention days, delete it
|
||||||
|
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
|
||||||
|
Bucket: aws.String(bucket),
|
||||||
|
Key: object.Key,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to delete object %s: %v", *object.Key, err)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Deleted object %s\n", *object.Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return !lastPage
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to list objects: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Finished deleting old files.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user