Merge pull request #121 from jkaninda/storage

Storage
This commit is contained in:
2024-10-22 16:57:11 +02:00
committed by GitHub
21 changed files with 707 additions and 459 deletions

View File

@@ -74,10 +74,10 @@ networks:
The title and body of the notifications can be tailored to your needs using Go templates. The title and body of the notifications can be tailored to your needs using Go templates.
Template sources must be mounted inside the container in /config/templates: Template sources must be mounted inside the container in /config/templates:
- email.template: Email notification template - email.tmpl: Email notification template
- telegram.template: Telegram notification template - telegram.tmpl: Telegram notification template
- email-error.template: Error notification template - email-error.tmpl: Error notification template
- telegram-error.template: Error notification template - telegram-error.tmpl: Error notification template
### Data ### Data

1
go.mod
View File

@@ -22,6 +22,7 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jkaninda/go-storage v0.0.0-20241022140446-c79ba2b4300d // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect

2
go.sum
View File

@@ -37,6 +37,8 @@ github.com/jkaninda/encryptor v0.0.0-20241013043504-6641402116a4 h1:FfVePubMVwx9
github.com/jkaninda/encryptor v0.0.0-20241013043504-6641402116a4/go.mod h1:9F8ZJ+ZXE8DZBo77+aneGj8LMjrYXX6eFUCC/uqZOUo= github.com/jkaninda/encryptor v0.0.0-20241013043504-6641402116a4/go.mod h1:9F8ZJ+ZXE8DZBo77+aneGj8LMjrYXX6eFUCC/uqZOUo=
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221 h1:AwkCf7el1kzeCJ89A+gUAK0ero5JYnvLOKsYMzq+rs4= github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221 h1:AwkCf7el1kzeCJ89A+gUAK0ero5JYnvLOKsYMzq+rs4=
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221/go.mod h1:9F8ZJ+ZXE8DZBo77+aneGj8LMjrYXX6eFUCC/uqZOUo= github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221/go.mod h1:9F8ZJ+ZXE8DZBo77+aneGj8LMjrYXX6eFUCC/uqZOUo=
github.com/jkaninda/go-storage v0.0.0-20241022140446-c79ba2b4300d h1:AFmLusMhR9TOpkZIFt7+dSSflenGWvTl26RttBo71ds=
github.com/jkaninda/go-storage v0.0.0-20241022140446-c79ba2b4300d/go.mod h1:7VK5gQISQaLxtLfBtc+een8spcgLVSBAKTRuyF1N81I=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg= github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI= github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=

View File

@@ -9,9 +9,14 @@ package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/encryptor" "github.com/jkaninda/encryptor"
"github.com/jkaninda/pg-bkup/pkg/storage/ftp"
"github.com/jkaninda/pg-bkup/pkg/storage/local"
"github.com/jkaninda/pg-bkup/pkg/storage/s3"
"github.com/jkaninda/pg-bkup/pkg/storage/ssh"
"github.com/jkaninda/pg-bkup/utils" "github.com/jkaninda/pg-bkup/utils"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"log" "log"
"os" "os"
"os/exec" "os/exec"
@@ -108,7 +113,7 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
} }
} }
func startMultiBackup(bkConfig *BackupConfig, configFile string) { func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Starting multiple backup job...") utils.Info("Starting backup task...")
conf, err := readConf(configFile) conf, err := readConf(configFile)
if err != nil { if err != nil {
utils.Fatal("Error reading config file: %s", err) utils.Fatal("Error reading config file: %s", err)
@@ -123,7 +128,7 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
} else { } else {
// Check if cronExpression is valid // Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) { if utils.IsValidCronExpression(bkConfig.cronExpression) {
utils.Info("Running MultiBackup in Scheduled mode") utils.Info("Running backup in Scheduled mode")
utils.Info("Backup cron expression: %s", bkConfig.cronExpression) utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat)) utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", bkConfig.storage) utils.Info("Storage type %s ", bkConfig.storage)
@@ -132,7 +137,7 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Testing backup configurations...") utils.Info("Testing backup configurations...")
multiBackupTask(conf.Databases, bkConfig) multiBackupTask(conf.Databases, bkConfig)
utils.Info("Testing backup configurations...done") utils.Info("Testing backup configurations...done")
utils.Info("Creating multi backup job...") utils.Info("Creating backup job...")
// Create a new cron instance // Create a new cron instance
c := cron.New() c := cron.New()
@@ -146,7 +151,7 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
} }
// Start the cron scheduler // Start the cron scheduler
c.Start() c.Start()
utils.Info("Creating multi backup job...done") utils.Info("Creating backup job...done")
utils.Info("Backup job started") utils.Info("Backup job started")
defer c.Stop() defer c.Stop()
select {} select {}
@@ -244,21 +249,32 @@ func localBackup(db *dbConfig, config *BackupConfig) {
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
moveToBackup(finalFileName, storagePath) localStorage := local.NewStorage(local.Config{
LocalPath: tmpPath,
RemotePath: storagePath,
})
err = localStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
BackupSize: backupSize, BackupSize: backupSize,
Database: db.dbName, Database: db.dbName,
Storage: config.storage, Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName), BackupLocation: filepath.Join(storagePath, finalFileName),
StartTime: startTime, StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()), EndTime: time.Now().Format(utils.TimeFormat()),
}) })
//Delete old backup //Delete old backup
if config.prune { if config.prune {
deleteOldBackup(config.backupRetention) err = localStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
} }
//Delete temp //Delete temp
deleteTemp() deleteTemp()
@@ -266,11 +282,7 @@ func localBackup(db *dbConfig, config *BackupConfig) {
} }
func s3Backup(db *dbConfig, config *BackupConfig) { func s3Backup(db *dbConfig, config *BackupConfig) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
if config.remotePath != "" {
s3Path = config.remotePath
}
utils.Info("Backup database to s3 storage") utils.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
//Backup database //Backup database
@@ -281,12 +293,28 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup archive to remote storage S3 ... ") utils.Info("Uploading backup archive to remote storage S3 ... ")
awsConfig := initAWSConfig()
if config.remotePath == "" {
config.remotePath = awsConfig.remotePath
}
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path) s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath,
LocalPath: tmpPath,
})
if err != nil { if err != nil {
utils.Fatal("Error uploading backup archive to S3: %s ", err) utils.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
} }
//Get backup info //Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
@@ -303,11 +331,12 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
} }
// Delete old backup // Delete old backup
if config.prune { if config.prune {
err := DeleteOldBackup(bucket, s3Path, config.backupRetention) err := s3Storage.Prune(config.backupRetention)
if err != nil { if err != nil {
utils.Fatal("Error deleting old backup from S3: %s ", err) utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
utils.Info("Uploading backup archive to remote storage S3 ... done ") utils.Info("Uploading backup archive to remote storage S3 ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
@@ -315,7 +344,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
BackupSize: backupSize, BackupSize: backupSize,
Database: db.dbName, Database: db.dbName,
Storage: config.storage, Storage: config.storage,
BackupLocation: filepath.Join(s3Path, finalFileName), BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime, StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()), EndTime: time.Now().Format(utils.TimeFormat()),
}) })
@@ -336,10 +365,25 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Info("Uploading backup archive to remote storage ... ") utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, config.remotePath) sshConfig, err := loadSSHConfig()
if err != nil { if err != nil {
utils.Fatal("Error uploading file to the remote server: %s ", err) utils.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
} }
//Get backup info //Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
@@ -347,6 +391,7 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Error("Error:", err) utils.Error("Error:", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
//Delete backup file from tmp folder //Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
@@ -355,11 +400,12 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
} }
if config.prune { if config.prune {
//TODO: Delete old backup from remote server err := sshStorage.Prune(config.backupRetention)
utils.Info("Deleting old backup from a remote server is not implemented yet") if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
}
utils.Info("Uploading backup archive to remote storage ... done ") utils.Info("Uploading backup archive to remote storage ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
@@ -389,11 +435,23 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Info("Uploading backup archive to the remote FTP server ... ") utils.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := CopyToFTP(finalFileName, config.remotePath) ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil { if err != nil {
utils.Fatal("Error uploading file to the remote FTP server: %s ", err) utils.Fatal("Error creating SSH storage: %s", err)
} }
err = ftpStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
//Get backup info //Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
@@ -407,8 +465,10 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
} }
if config.prune { if config.prune {
//TODO: Delete old backup from remote server err := ftpStorage.Prune(config.backupRetention)
utils.Info("Deleting old backup from a remote server is not implemented yet") if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
} }

View File

@@ -80,6 +80,7 @@ type AWSConfig struct {
accessKey string accessKey string
secretKey string secretKey string
region string region string
remotePath string
disableSsl bool disableSsl bool
forcePathStyle bool forcePathStyle bool
} }
@@ -129,7 +130,7 @@ func loadSSHConfig() (*SSHConfig, error) {
identifyFile: os.Getenv("SSH_IDENTIFY_FILE"), identifyFile: os.Getenv("SSH_IDENTIFY_FILE"),
}, nil }, nil
} }
func initFtpConfig() *FTPConfig { func loadFtpConfig() *FTPConfig {
//Initialize data configs //Initialize data configs
fConfig := FTPConfig{} fConfig := FTPConfig{}
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME") fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
@@ -151,6 +152,8 @@ func initAWSConfig() *AWSConfig {
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY") aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY") aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
aConfig.remotePath = utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
aConfig.region = os.Getenv("AWS_REGION") aConfig.region = os.Getenv("AWS_REGION")
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL")) disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil { if err != nil {

View File

@@ -1,81 +0,0 @@
package pkg
import (
"fmt"
"github.com/jlaffaye/ftp"
"io"
"os"
"path/filepath"
"time"
)
// initFtpClient initializes and authenticates an FTP client
func initFtpClient() (*ftp.ServerConn, error) {
ftpConfig := initFtpConfig()
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
if err != nil {
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
}
return ftpClient, nil
}
// CopyToFTP uploads a file to the remote FTP server
func CopyToFTP(fileName, remotePath string) (err error) {
ftpConfig := initFtpConfig()
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", fileName, err)
}
defer file.Close()
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
err = ftpClient.Stor(remoteFilePath, file)
if err != nil {
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
}
return nil
}
// CopyFromFTP downloads a file from the remote FTP server
func CopyFromFTP(fileName, remotePath string) (err error) {
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
remoteFilePath := filepath.Join(remotePath, fileName)
r, err := ftpClient.Retr(remoteFilePath)
if err != nil {
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
}
defer r.Close()
localFilePath := filepath.Join(tmpPath, fileName)
outFile, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
}
defer outFile.Close()
_, err = io.Copy(outFile, r)
if err != nil {
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
}
return nil
}

View File

@@ -15,7 +15,6 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
) )
func intro() { func intro() {
@@ -24,71 +23,6 @@ func intro() {
} }
// copyToTmp copy file to temporary directory // copyToTmp copy file to temporary directory
func copyToTmp(sourcePath string, backupFileName string) {
//Copy backup from storage to /tmp
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
if err != nil {
utils.Fatal("Error copying file %s %v", backupFileName, err)
}
}
func moveToBackup(backupFileName string, destinationPath string) {
//Copy backup from tmp folder to storage destination
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
if err != nil {
utils.Fatal("Error copying file %s %v", backupFileName, err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
utils.Error("Error deleting file: %s", err)
}
utils.Info("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
}
func deleteOldBackup(retentionDays int) {
utils.Info("Deleting old backups...")
storagePath = os.Getenv("STORAGE_PATH")
// Define the directory path
backupDir := storagePath + "/"
// Get current time
currentTime := time.Now()
// Delete file
deleteFile := func(filePath string) error {
err := os.Remove(filePath)
if err != nil {
utils.Fatal("Error:", err)
} else {
utils.Info("File %s deleted successfully", filePath)
}
return err
}
// Walk through the directory and delete files modified more than specified days ago
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if it's a regular file and if it was modified more than specified days ago
if fileInfo.Mode().IsRegular() {
timeDiff := currentTime.Sub(fileInfo.ModTime())
if timeDiff.Hours() > 24*float64(retentionDays) {
err := deleteFile(filePath)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
utils.Fatal("Error:", err)
return
}
utils.Info("Deleting old backups...done")
}
func deleteTemp() { func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath) utils.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error { err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {

View File

@@ -8,6 +8,10 @@ package pkg
import ( import (
"github.com/jkaninda/encryptor" "github.com/jkaninda/encryptor"
"github.com/jkaninda/pg-bkup/pkg/storage/ftp"
"github.com/jkaninda/pg-bkup/pkg/storage/local"
"github.com/jkaninda/pg-bkup/pkg/storage/s3"
"github.com/jkaninda/pg-bkup/pkg/storage/ssh"
"github.com/jkaninda/pg-bkup/utils" "github.com/jkaninda/pg-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
@@ -22,9 +26,7 @@ func StartRestore(cmd *cobra.Command) {
switch restoreConf.storage { switch restoreConf.storage {
case "local": case "local":
utils.Info("Restore database from local") localRestore(dbConf, restoreConf)
copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(dbConf, restoreConf)
case "s3", "S3": case "s3", "S3":
restoreFromS3(dbConf, restoreConf) restoreFromS3(dbConf, restoreConf)
case "ssh", "SSH", "remote": case "ssh", "SSH", "remote":
@@ -32,33 +34,101 @@ func StartRestore(cmd *cobra.Command) {
case "ftp", "FTP": case "ftp", "FTP":
restoreFromFTP(dbConf, restoreConf) restoreFromFTP(dbConf, restoreConf)
default: default:
utils.Info("Restore database from local") localRestore(dbConf, restoreConf)
copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(dbConf, restoreConf)
} }
} }
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
utils.Info("Restore database from local")
localStorage := local.NewStorage(local.Config{
RemotePath: storagePath,
LocalPath: tmpPath,
})
err := localStorage.CopyFrom(restoreConf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(dbConf, restoreConf)
}
func restoreFromS3(db *dbConfig, conf *RestoreConfig) { func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from s3") utils.Info("Restore database from s3")
err := DownloadFile(tmpPath, conf.file, conf.bucket, conf.s3Path) //err := DownloadFile(tmpPath, conf.file, conf.bucket, conf.s3Path)
//if err != nil {
// utils.Fatal("Error download file from s3 %s %v ", conf.file, err)
//}
awsConfig := initAWSConfig()
if conf.remotePath == "" {
conf.remotePath = awsConfig.remotePath
}
s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath,
LocalPath: tmpPath,
})
if err != nil { if err != nil {
utils.Fatal("Error download file from s3 %s %v ", conf.file, err) utils.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error download file from S3 storage: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) { func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from remote server") utils.Info("Restore database from remote server")
err := CopyFromRemote(conf.file, conf.remotePath) //err := CopyFromRemote(conf.file, conf.remotePath)
//if err != nil {
// utils.Fatal("Error download file from remote server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
//}
sshConfig, err := loadSSHConfig()
if err != nil { if err != nil {
utils.Fatal("Error download file from remote server: %s %v", filepath.Join(conf.remotePath, conf.file), err) utils.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %w", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) { func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from FTP server") utils.Info("Restore database from FTP server")
err := CopyFromFTP(conf.file, conf.remotePath) //err := CopyFromFTP(conf.file, conf.remotePath)
//if err != nil {
// utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
//}
ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil { if err != nil {
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(conf.remotePath, conf.file), err) utils.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }

151
pkg/s3.go
View File

@@ -1,151 +0,0 @@
// Package pkg
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"bytes"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/jkaninda/pg-bkup/utils"
"net/http"
"os"
"path/filepath"
"time"
)
// CreateSession creates a new AWS session
func CreateSession() (*session.Session, error) {
awsConfig := initAWSConfig()
// Configure to use MinIO Server
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(awsConfig.accessKey, awsConfig.secretKey, ""),
Endpoint: aws.String(awsConfig.endpoint),
Region: aws.String(awsConfig.region),
DisableSSL: aws.Bool(awsConfig.disableSsl),
S3ForcePathStyle: aws.Bool(awsConfig.forcePathStyle),
}
return session.NewSession(s3Config)
}
// UploadFileToS3 uploads a file to S3 with a given prefix
func UploadFileToS3(filePath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
file, err := os.Open(filepath.Join(filePath, key))
if err != nil {
return err
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
objectKey := filepath.Join(prefix, key)
buffer := make([]byte, fileInfo.Size())
file.Read(buffer)
fileBytes := bytes.NewReader(buffer)
fileType := http.DetectContentType(buffer)
_, err = svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
Body: fileBytes,
ContentLength: aws.Int64(fileInfo.Size()),
ContentType: aws.String(fileType),
})
if err != nil {
return err
}
return nil
}
func DownloadFile(destinationPath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
utils.Info("Download data from S3 storage...")
file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil {
utils.Error("Failed to create file", err)
return err
}
defer file.Close()
objectKey := filepath.Join(prefix, key)
downloader := s3manager.NewDownloader(sess)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
if err != nil {
utils.Error("Failed to download file %s", key)
return err
}
utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
return nil
}
func DeleteOldBackup(bucket, prefix string, retention int) error {
utils.Info("Deleting old backups...")
utils.Info("Bucket %s Prefix: %s Retention: %d", bucket, prefix, retention)
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
// Get the current time
now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retention)
// List objects in the bucket
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
}
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) {
utils.Info("Deleting old backup: %s", *object.Key)
// Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: object.Key,
})
if err != nil {
utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else {
utils.Info("Deleted object %s", *object.Key)
}
}
}
return !lastPage
})
if err != nil {
utils.Error("Failed to list objects: %v", err)
}
utils.Info("Deleting old backups...done")
return nil
}

View File

@@ -1,110 +0,0 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"context"
"errors"
"fmt"
"github.com/bramvdbogaerde/go-scp"
"github.com/bramvdbogaerde/go-scp/auth"
"github.com/jkaninda/pg-bkup/utils"
"golang.org/x/crypto/ssh"
"os"
"path/filepath"
)
// createSSHClientConfig sets up the SSH client configuration based on the provided SSHConfig
func createSSHClientConfig(sshConfig *SSHConfig) (ssh.ClientConfig, error) {
if sshConfig.identifyFile != "" && utils.FileExists(sshConfig.identifyFile) {
return auth.PrivateKey(sshConfig.user, sshConfig.identifyFile, ssh.InsecureIgnoreHostKey())
} else {
if sshConfig.password == "" {
return ssh.ClientConfig{}, errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
}
utils.Warn("Accessing the remote server using password, which is not recommended.")
return auth.PasswordKey(sshConfig.user, sshConfig.password, ssh.InsecureIgnoreHostKey())
}
}
// CopyToRemote copies a file to a remote server via SCP
func CopyToRemote(fileName, remotePath string) error {
// Load environment variables
sshConfig, err := loadSSHConfig()
if err != nil {
return fmt.Errorf("failed to load SSH configuration: %w", err)
}
// Initialize SSH client config
clientConfig, err := createSSHClientConfig(sshConfig)
if err != nil {
return fmt.Errorf("failed to create SSH client config: %w", err)
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
// Connect to the remote server
err = client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n")
}
// Open the local file
filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", filePath, err)
}
defer client.Close()
// Copy file to the remote server
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
if err != nil {
return fmt.Errorf("failed to copy file to remote server: %w", err)
}
return nil
}
func CopyFromRemote(fileName, remotePath string) error {
// Load environment variables
sshConfig, err := loadSSHConfig()
if err != nil {
return fmt.Errorf("failed to load SSH configuration: %w", err)
}
// Initialize SSH client config
clientConfig, err := createSSHClientConfig(sshConfig)
if err != nil {
return fmt.Errorf("failed to create SSH client config: %w", err)
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
// Connect to the remote server
err = client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n")
}
// Close client connection after the file has been copied
defer client.Close()
file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
fmt.Println("Couldn't open the output file")
}
defer file.Close()
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
if err != nil {
utils.Error("Error while copying file %s ", err)
return err
}
return nil
}

118
pkg/storage/ftp/ftp.go Normal file
View File

@@ -0,0 +1,118 @@
package ftp
import (
"fmt"
"github.com/jkaninda/pg-bkup/pkg/storage"
"github.com/jkaninda/pg-bkup/utils"
"github.com/jlaffaye/ftp"
"io"
"os"
"path/filepath"
"time"
)
type ftpStorage struct {
*storage.Backend
client *ftp.ServerConn
}
// Config holds the SSH connection details
type Config struct {
Host string
User string
Password string
Port string
LocalPath string
RemotePath string
}
// createClient creates FTP Client
func createClient(conf Config) (*ftp.ServerConn, error) {
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", conf.Host, conf.Port), ftp.DialWithTimeout(5*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
err = ftpClient.Login(conf.User, conf.Password)
if err != nil {
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
}
return ftpClient, nil
}
// NewStorage creates new Storage
func NewStorage(conf Config) (storage.Storage, error) {
client, err := createClient(conf)
if err != nil {
return nil, err
}
return &ftpStorage{
client: client,
Backend: &storage.Backend{
RemotePath: conf.RemotePath,
LocalPath: conf.LocalPath,
},
}, nil
}
// Copy copies file to the remote server
func (s ftpStorage) Copy(fileName string) error {
ftpClient := s.client
defer ftpClient.Quit()
filePath := filepath.Join(s.LocalPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", fileName, err)
}
defer file.Close()
remoteFilePath := filepath.Join(s.RemotePath, fileName)
err = ftpClient.Stor(remoteFilePath, file)
if err != nil {
return fmt.Errorf("failed to upload file %s: %w", filepath.Join(s.LocalPath, fileName), err)
}
return nil
}
// CopyFrom copies a file from the remote server to local storage
func (s ftpStorage) CopyFrom(fileName string) error {
ftpClient := s.client
defer ftpClient.Quit()
remoteFilePath := filepath.Join(s.RemotePath, fileName)
r, err := ftpClient.Retr(remoteFilePath)
if err != nil {
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
}
defer r.Close()
localFilePath := filepath.Join(s.LocalPath, fileName)
outFile, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
}
defer outFile.Close()
_, err = io.Copy(outFile, r)
if err != nil {
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
}
return nil
}
// Prune deletes old backup created more than specified days
func (s ftpStorage) Prune(retentionDays int) error {
utils.Info("Deleting old backup from a remote server is not implemented yet")
return nil
}
// Name returns the storage name
func (s ftpStorage) Name() string {
return "ftp"
}

108
pkg/storage/local/local.go Normal file
View File

@@ -0,0 +1,108 @@
package local
import (
"github.com/jkaninda/pg-bkup/pkg/storage"
"github.com/jkaninda/pg-bkup/utils"
"io"
"os"
"path/filepath"
"time"
)
type localStorage struct {
*storage.Backend
}
type Config struct {
LocalPath string
RemotePath string
}
func NewStorage(conf Config) storage.Storage {
return &localStorage{
Backend: &storage.Backend{
LocalPath: conf.LocalPath,
RemotePath: conf.RemotePath,
},
}
}
func (l localStorage) Copy(file string) error {
if _, err := os.Stat(filepath.Join(l.LocalPath, file)); os.IsNotExist(err) {
return err
}
err := copyFile(filepath.Join(l.LocalPath, file), filepath.Join(l.RemotePath, file))
if err != nil {
return err
}
return nil
}
func (l localStorage) CopyFrom(file string) error {
if _, err := os.Stat(filepath.Join(l.RemotePath, file)); os.IsNotExist(err) {
return err
}
err := copyFile(filepath.Join(l.RemotePath, file), filepath.Join(l.LocalPath, file))
if err != nil {
return err
}
return nil
}
// Prune deletes old backup created more than specified days
func (l localStorage) Prune(retentionDays int) error {
currentTime := time.Now()
// Delete file
deleteFile := func(filePath string) error {
err := os.Remove(filePath)
if err != nil {
utils.Fatal("Error:", err)
} else {
utils.Info("File %s deleted successfully", filePath)
}
return err
}
// Walk through the directory and delete files modified more than specified days ago
err := filepath.Walk(l.RemotePath, func(filePath string, fileInfo os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if it's a regular file and if it was modified more than specified days ago
if fileInfo.Mode().IsRegular() {
timeDiff := currentTime.Sub(fileInfo.ModTime())
if timeDiff.Hours() > 24*float64(retentionDays) {
err := deleteFile(filePath)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
return err
}
return nil
}
func (l localStorage) Name() string {
return "local"
}
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}

163
pkg/storage/s3/s3.go Normal file
View File

@@ -0,0 +1,163 @@
package s3
import (
"bytes"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/jkaninda/pg-bkup/pkg/storage"
"github.com/jkaninda/pg-bkup/utils"
"net/http"
"os"
"path/filepath"
"time"
)
type s3Storage struct {
*storage.Backend
client *session.Session
bucket string
}
type Config struct {
Endpoint string
Bucket string
AccessKey string
SecretKey string
Region string
DisableSsl bool
ForcePathStyle bool
LocalPath string
RemotePath string
}
// CreateSession creates a new AWS session
func createSession(conf Config) (*session.Session, error) {
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(conf.AccessKey, conf.SecretKey, ""),
Endpoint: aws.String(conf.Endpoint),
Region: aws.String(conf.Region),
DisableSSL: aws.Bool(conf.DisableSsl),
S3ForcePathStyle: aws.Bool(conf.ForcePathStyle),
}
return session.NewSession(s3Config)
}
func NewStorage(conf Config) (storage.Storage, error) {
sess, err := createSession(conf)
if err != nil {
return nil, err
}
return &s3Storage{
client: sess,
bucket: conf.Bucket,
Backend: &storage.Backend{
RemotePath: conf.RemotePath,
LocalPath: conf.LocalPath,
},
}, nil
}
func (s s3Storage) Copy(fileName string) error {
svc := s3.New(s.client)
file, err := os.Open(filepath.Join(s.LocalPath, fileName))
if err != nil {
return err
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
objectKey := filepath.Join(s.RemotePath, fileName)
buffer := make([]byte, fileInfo.Size())
file.Read(buffer)
fileBytes := bytes.NewReader(buffer)
fileType := http.DetectContentType(buffer)
_, err = svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(objectKey),
Body: fileBytes,
ContentLength: aws.Int64(fileInfo.Size()),
ContentType: aws.String(fileType),
})
if err != nil {
return err
}
return nil
}
// CopyFrom copies a file from the remote server to local storage
func (s s3Storage) CopyFrom(fileName string) error {
utils.Info("Copy data from S3 storage...")
file, err := os.Create(filepath.Join(s.LocalPath, fileName))
if err != nil {
return err
}
defer file.Close()
objectKey := filepath.Join(s.RemotePath, fileName)
downloader := s3manager.NewDownloader(s.client)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(objectKey),
})
if err != nil {
utils.Error("Failed to download file %s", fileName)
return err
}
utils.Info("Backup downloaded: %s , bytes size: %d ", file.Name(), uint64(numBytes))
return nil
}
// Prune deletes old backup created more than specified days
func (s s3Storage) Prune(retentionDays int) error {
svc := s3.New(s.client)
// Get the current time
now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retentionDays)
// List objects in the bucket
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(s.bucket),
Prefix: aws.String(s.RemotePath),
}
err := svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) {
utils.Info("Deleting old backup: %s", *object.Key)
// Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: object.Key,
})
if err != nil {
utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else {
utils.Info("Deleted object %s", *object.Key)
}
}
}
return !lastPage
})
if err != nil {
utils.Error("Failed to list objects: %v", err)
}
utils.Info("Deleting old backups...done")
return nil
}
// Name returns the storage name
func (s s3Storage) Name() string {
return "s3"
}

116
pkg/storage/ssh/ssh.go Normal file
View File

@@ -0,0 +1,116 @@
package ssh
import (
"context"
"errors"
"fmt"
"github.com/bramvdbogaerde/go-scp"
"github.com/bramvdbogaerde/go-scp/auth"
"github.com/jkaninda/pg-bkup/pkg/storage"
"github.com/jkaninda/pg-bkup/utils"
"golang.org/x/crypto/ssh"
"os"
"path/filepath"
)
type sshStorage struct {
*storage.Backend
client scp.Client
}
// Config holds the SSH connection details
type Config struct {
Host string
User string
Password string
Port string
IdentifyFile string
LocalPath string
RemotePath string
}
func createClient(conf Config) (scp.Client, error) {
if conf.IdentifyFile != "" && utils.FileExists(conf.IdentifyFile) {
clientConfig, err := auth.PrivateKey(conf.User, conf.IdentifyFile, ssh.InsecureIgnoreHostKey())
return scp.NewClient(fmt.Sprintf("%s:%s", conf.Host, conf.Port), &clientConfig), err
} else {
if conf.Password == "" {
return scp.Client{}, errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
}
utils.Warn("Accessing the remote server using password, which is not recommended.")
clientConfig, err := auth.PasswordKey(conf.User, conf.Password, ssh.InsecureIgnoreHostKey())
return scp.NewClient(fmt.Sprintf("%s:%s", conf.Host, conf.Port), &clientConfig), err
}
}
func NewStorage(conf Config) (storage.Storage, error) {
client, err := createClient(conf)
if err != nil {
return nil, err
}
return &sshStorage{
client: client,
Backend: &storage.Backend{
RemotePath: conf.RemotePath,
LocalPath: conf.LocalPath,
},
}, nil
}
func (s sshStorage) Copy(fileName string) error {
client := s.client
// Connect to the remote server
err := client.Connect()
if err != nil {
return errors.New("couldn't establish a connection to the remote server")
}
// Open the local file
filePath := filepath.Join(s.LocalPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", filePath, err)
}
defer client.Close()
// Copy file to the remote server
err = client.CopyFromFile(context.Background(), *file, filepath.Join(s.RemotePath, fileName), "0655")
if err != nil {
return fmt.Errorf("failed to copy file to remote server: %w", err)
}
return nil
}
// CopyFrom copies a file from the remote server to local storage
func (s sshStorage) CopyFrom(fileName string) error {
// Create a new SCP client
client := s.client
// Connect to the remote server
err := client.Connect()
if err != nil {
return errors.New("couldn't establish a connection to the remote server")
}
// Close client connection after the file has been copied
defer client.Close()
file, err := os.OpenFile(filepath.Join(s.LocalPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
return errors.New("couldn't open the output file")
}
defer file.Close()
err = client.CopyFromRemote(context.Background(), file, filepath.Join(s.RemotePath, fileName))
if err != nil {
return err
}
return nil
}
// Prune deletes old backup created more than specified days
func (s sshStorage) Prune(retentionDays int) error {
utils.Info("Deleting old backup from a remote server is not implemented yet")
return nil
}
func (s sshStorage) Name() string {
return "ssh"
}

14
pkg/storage/storage.go Normal file
View File

@@ -0,0 +1,14 @@
package storage
type Storage interface {
Copy(fileName string) error
CopyFrom(fileName string) error
Prune(retentionDays int) error
Name() string
}
type Backend struct {
//Local Path
LocalPath string
//Remote path or Destination path
RemotePath string
}

View File

@@ -28,6 +28,7 @@ var (
// dbHVars Required environment variables for database // dbHVars Required environment variables for database
var dbHVars = []string{ var dbHVars = []string{
"DB_HOST", "DB_HOST",
"DB_PORT",
"DB_PASSWORD", "DB_PASSWORD",
"DB_USERNAME", "DB_USERNAME",
"DB_NAME", "DB_NAME",

View File

@@ -99,7 +99,7 @@ func NotifySuccess(notificationData *NotificationData) {
//Email notification //Email notification
err := CheckEnvVars(mailVars) err := CheckEnvVars(mailVars)
if err == nil { if err == nil {
body, err := parseTemplate(*notificationData, "email.template") body, err := parseTemplate(*notificationData, "email.tmpl")
if err != nil { if err != nil {
Error("Could not parse email template: %v", err) Error("Could not parse email template: %v", err)
} }
@@ -111,7 +111,7 @@ func NotifySuccess(notificationData *NotificationData) {
//Telegram notification //Telegram notification
err = CheckEnvVars(vars) err = CheckEnvVars(vars)
if err == nil { if err == nil {
message, err := parseTemplate(*notificationData, "telegram.template") message, err := parseTemplate(*notificationData, "telegram.tmpl")
if err != nil { if err != nil {
Error("Could not parse telegram template: %v", err) Error("Could not parse telegram template: %v", err)
} }
@@ -143,7 +143,7 @@ func NotifyError(error string) {
Error: error, Error: error,
EndTime: time.Now().Format(TimeFormat()), EndTime: time.Now().Format(TimeFormat()),
BackupReference: os.Getenv("BACKUP_REFERENCE"), BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "email-error.template") }, "email-error.tmpl")
if err != nil { if err != nil {
Error("Could not parse error template: %v", err) Error("Could not parse error template: %v", err)
} }
@@ -159,7 +159,7 @@ func NotifyError(error string) {
Error: error, Error: error,
EndTime: time.Now().Format(TimeFormat()), EndTime: time.Now().Format(TimeFormat()),
BackupReference: os.Getenv("BACKUP_REFERENCE"), BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "telegram-error.template") }, "telegram-error.tmpl")
if err != nil { if err != nil {
Error("Could not parse error template: %v", err) Error("Could not parse error template: %v", err)