refactor: refactoring of code to meet all golangci-lint requirements
This commit is contained in:
@@ -1,9 +1,3 @@
|
||||
// Package internal /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package internal
|
||||
|
||||
import (
|
||||
@@ -13,6 +7,7 @@ import (
|
||||
"github.com/jkaninda/go-storage/pkg/local"
|
||||
"github.com/jkaninda/go-storage/pkg/s3"
|
||||
"github.com/jkaninda/go-storage/pkg/ssh"
|
||||
"github.com/jkaninda/pg-bkup/pkg/logger"
|
||||
"github.com/jkaninda/pg-bkup/utils"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -26,9 +21,9 @@ import (
|
||||
|
||||
func StartBackup(cmd *cobra.Command) {
|
||||
intro()
|
||||
//Initialize backup configs
|
||||
// Initialize backup configs
|
||||
config := initBackupConfig(cmd)
|
||||
//Load backup configuration file
|
||||
// Load backup configuration file
|
||||
configFile, err := loadConfigFile()
|
||||
if err != nil {
|
||||
dbConf = initDbConfig(cmd)
|
||||
@@ -38,7 +33,7 @@ func StartBackup(cmd *cobra.Command) {
|
||||
if utils.IsValidCronExpression(config.cronExpression) {
|
||||
scheduledMode(dbConf, config)
|
||||
} else {
|
||||
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
|
||||
logger.Fatal("Cron expression is not valid: %s", config.cronExpression)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -49,22 +44,22 @@ func StartBackup(cmd *cobra.Command) {
|
||||
|
||||
// scheduledMode Runs backup in scheduled mode
|
||||
func scheduledMode(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Running in Scheduled mode")
|
||||
utils.Info("Backup cron expression: %s", config.cronExpression)
|
||||
utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||
utils.Info("Storage type %s ", config.storage)
|
||||
logger.Info("Running in Scheduled mode")
|
||||
logger.Info("Backup cron expression: %s", config.cronExpression)
|
||||
logger.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||
logger.Info("Storage type %s ", config.storage)
|
||||
|
||||
//Test backup
|
||||
utils.Info("Testing backup configurations...")
|
||||
// Test backup
|
||||
logger.Info("Testing backup configurations...")
|
||||
BackupTask(db, config)
|
||||
utils.Info("Testing backup configurations...done")
|
||||
utils.Info("Creating backup job...")
|
||||
logger.Info("Testing backup configurations...done")
|
||||
logger.Info("Creating backup job...")
|
||||
// Create a new cron instance
|
||||
c := cron.New()
|
||||
|
||||
_, err := c.AddFunc(config.cronExpression, func() {
|
||||
BackupTask(db, config)
|
||||
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||
logger.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||
|
||||
})
|
||||
if err != nil {
|
||||
@@ -72,8 +67,8 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
|
||||
}
|
||||
// Start the cron scheduler
|
||||
c.Start()
|
||||
utils.Info("Creating backup job...done")
|
||||
utils.Info("Backup job started")
|
||||
logger.Info("Creating backup job...done")
|
||||
logger.Info("Backup job started")
|
||||
defer c.Stop()
|
||||
select {}
|
||||
}
|
||||
@@ -81,7 +76,7 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
|
||||
// multiBackupTask backup multi database
|
||||
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
|
||||
for _, db := range databases {
|
||||
//Check if path is defined in config file
|
||||
// Check if path is defined in config file
|
||||
if db.Path != "" {
|
||||
bkConfig.remotePath = db.Path
|
||||
}
|
||||
@@ -91,8 +86,8 @@ func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
|
||||
|
||||
// BackupTask backups database
|
||||
func BackupTask(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Starting backup task...")
|
||||
//Generate file name
|
||||
logger.Info("Starting backup task...")
|
||||
// Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
|
||||
if config.disableCompression {
|
||||
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
|
||||
@@ -107,18 +102,17 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
|
||||
sshBackup(db, config)
|
||||
case "ftp", "FTP":
|
||||
ftpBackup(db, config)
|
||||
//utils.Fatal("Not supported storage type: %s", config.storage)
|
||||
default:
|
||||
localBackup(db, config)
|
||||
}
|
||||
}
|
||||
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
|
||||
utils.Info("Starting backup task...")
|
||||
logger.Info("Starting backup task...")
|
||||
conf, err := readConf(configFile)
|
||||
if err != nil {
|
||||
utils.Fatal("Error reading config file: %s", err)
|
||||
logger.Fatal("Error reading config file: %s", err)
|
||||
}
|
||||
//Check if cronExpression is defined in config file
|
||||
// Check if cronExpression is defined in config file
|
||||
if conf.CronExpression != "" {
|
||||
bkConfig.cronExpression = conf.CronExpression
|
||||
}
|
||||
@@ -128,22 +122,22 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
|
||||
} else {
|
||||
// Check if cronExpression is valid
|
||||
if utils.IsValidCronExpression(bkConfig.cronExpression) {
|
||||
utils.Info("Running backup in Scheduled mode")
|
||||
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
|
||||
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||
utils.Info("Storage type %s ", bkConfig.storage)
|
||||
logger.Info("Running backup in Scheduled mode")
|
||||
logger.Info("Backup cron expression: %s", bkConfig.cronExpression)
|
||||
logger.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||
logger.Info("Storage type %s ", bkConfig.storage)
|
||||
|
||||
//Test backup
|
||||
utils.Info("Testing backup configurations...")
|
||||
// Test backup
|
||||
logger.Info("Testing backup configurations...")
|
||||
multiBackupTask(conf.Databases, bkConfig)
|
||||
utils.Info("Testing backup configurations...done")
|
||||
utils.Info("Creating backup job...")
|
||||
logger.Info("Testing backup configurations...done")
|
||||
logger.Info("Creating backup job...")
|
||||
// Create a new cron instance
|
||||
c := cron.New()
|
||||
|
||||
_, err := c.AddFunc(bkConfig.cronExpression, func() {
|
||||
multiBackupTask(conf.Databases, bkConfig)
|
||||
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||
logger.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||
|
||||
})
|
||||
if err != nil {
|
||||
@@ -151,13 +145,13 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
|
||||
}
|
||||
// Start the cron scheduler
|
||||
c.Start()
|
||||
utils.Info("Creating backup job...done")
|
||||
utils.Info("Backup job started")
|
||||
logger.Info("Creating backup job...done")
|
||||
logger.Info("Backup job started")
|
||||
defer c.Stop()
|
||||
select {}
|
||||
|
||||
} else {
|
||||
utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
|
||||
logger.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,7 +162,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
||||
|
||||
storagePath = os.Getenv("STORAGE_PATH")
|
||||
|
||||
utils.Info("Starting database backup...")
|
||||
logger.Info("Starting database backup...")
|
||||
|
||||
err := os.Setenv("PGPASSWORD", db.dbPassword)
|
||||
if err != nil {
|
||||
@@ -176,7 +170,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
||||
}
|
||||
testDatabaseConnection(db)
|
||||
// Backup Database database
|
||||
utils.Info("Backing up database...")
|
||||
logger.Info("Backing up database...")
|
||||
|
||||
// Verify is compression is disabled
|
||||
if disableCompression {
|
||||
@@ -196,7 +190,13 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer file.Close()
|
||||
defer func(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
}
|
||||
}(file)
|
||||
|
||||
_, err = file.Write(output)
|
||||
if err != nil {
|
||||
@@ -219,7 +219,10 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
||||
gzipCmd.Stdin = stdout
|
||||
// save output
|
||||
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
|
||||
gzipCmd.Start()
|
||||
err2 := gzipCmd.Start()
|
||||
if err2 != nil {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -231,11 +234,11 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
||||
}
|
||||
|
||||
}
|
||||
utils.Info("Database has been backed up")
|
||||
logger.Info("Database has been backed up")
|
||||
|
||||
}
|
||||
func localBackup(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Backup database to local storage")
|
||||
logger.Info("Backup database to local storage")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
@@ -245,20 +248,20 @@ func localBackup(db *dbConfig, config *BackupConfig) {
|
||||
}
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error: %s", err)
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
localStorage := local.NewStorage(local.Config{
|
||||
LocalPath: tmpPath,
|
||||
RemotePath: storagePath,
|
||||
})
|
||||
err = localStorage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
utils.Fatal("Error copying backup file: %s", err)
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
|
||||
//Send notification
|
||||
logger.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
@@ -268,36 +271,36 @@ func localBackup(db *dbConfig, config *BackupConfig) {
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
//Delete old backup
|
||||
// Delete old backup
|
||||
if config.prune {
|
||||
err = localStorage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
|
||||
}
|
||||
//Delete temp
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
utils.Info("Backup completed successfully")
|
||||
logger.Info("Backup completed successfully")
|
||||
}
|
||||
|
||||
func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||
|
||||
utils.Info("Backup database to s3 storage")
|
||||
logger.Info("Backup database to s3 storage")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
//Backup database
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
||||
logger.Info("Uploading backup archive to remote storage S3 ... ")
|
||||
awsConfig := initAWSConfig()
|
||||
if config.remotePath == "" {
|
||||
config.remotePath = awsConfig.remotePath
|
||||
}
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
s3Storage, err := s3.NewStorage(s3.Config{
|
||||
Endpoint: awsConfig.endpoint,
|
||||
Bucket: awsConfig.bucket,
|
||||
@@ -310,20 +313,20 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
utils.Fatal("Error creating s3 storage: %s", err)
|
||||
logger.Fatal("Error creating s3 storage: %s", err)
|
||||
}
|
||||
err = s3Storage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
utils.Fatal("Error copying backup file: %s", err)
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
//Get backup info
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error: %s", err)
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
|
||||
//Delete backup file from tmp folder
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
||||
if err != nil {
|
||||
fmt.Println("Error deleting file: ", err)
|
||||
@@ -333,12 +336,12 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||
if config.prune {
|
||||
err := s3Storage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
}
|
||||
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
utils.Info("Uploading backup archive to remote storage S3 ... done ")
|
||||
//Send notification
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
logger.Info("Uploading backup archive to remote storage S3 ... done ")
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
@@ -348,26 +351,26 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
//Delete temp
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
utils.Info("Backup completed successfully")
|
||||
logger.Info("Backup completed successfully")
|
||||
|
||||
}
|
||||
func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Backup database to Remote server")
|
||||
logger.Info("Backup database to Remote server")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
//Backup database
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup archive to remote storage ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
logger.Info("Uploading backup archive to remote storage ... ")
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
sshConfig, err := loadSSHConfig()
|
||||
if err != nil {
|
||||
utils.Fatal("Error loading ssh config: %s", err)
|
||||
logger.Fatal("Error loading ssh config: %s", err)
|
||||
}
|
||||
|
||||
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||
@@ -379,35 +382,35 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
utils.Fatal("Error creating SSH storage: %s", err)
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = sshStorage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
utils.Fatal("Error copying backup file: %s", err)
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
//Get backup info
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error: %s", err)
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
|
||||
//Delete backup file from tmp folder
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error deleting file: %v", err)
|
||||
logger.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if config.prune {
|
||||
err := sshStorage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
|
||||
}
|
||||
utils.Info("Uploading backup archive to remote storage ... done ")
|
||||
//Send notification
|
||||
logger.Info("Uploading backup archive to remote storage ... done ")
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
@@ -417,24 +420,24 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
//Delete temp
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
utils.Info("Backup completed successfully")
|
||||
logger.Info("Backup completed successfully")
|
||||
|
||||
}
|
||||
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Backup database to the remote FTP server")
|
||||
logger.Info("Backup database to the remote FTP server")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
|
||||
//Backup database
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup archive to the remote FTP server ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
logger.Info("Uploading backup archive to the remote FTP server ... ")
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
ftpConfig := loadFtpConfig()
|
||||
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||
Host: ftpConfig.host,
|
||||
@@ -445,36 +448,36 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
utils.Fatal("Error creating SSH storage: %s", err)
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = ftpStorage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
utils.Fatal("Error copying backup file: %s", err)
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
//Get backup info
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error: %s", err)
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
//Delete backup file from tmp folder
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error deleting file: %v", err)
|
||||
logger.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if config.prune {
|
||||
err := ftpStorage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
utils.Info("Uploading backup archive to the remote FTP server ... done ")
|
||||
logger.Info("Uploading backup archive to the remote FTP server ... done ")
|
||||
|
||||
//Send notification
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
@@ -484,36 +487,36 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
//Delete temp
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
utils.Info("Backup completed successfully")
|
||||
logger.Info("Backup completed successfully")
|
||||
}
|
||||
|
||||
func encryptBackup(config *BackupConfig) {
|
||||
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
|
||||
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
|
||||
if err != nil {
|
||||
utils.Fatal("Error reading backup file: %s ", err)
|
||||
logger.Fatal("Error reading backup file: %s ", err)
|
||||
}
|
||||
if config.usingKey {
|
||||
utils.Info("Encrypting backup using public key...")
|
||||
logger.Info("Encrypting backup using public key...")
|
||||
pubKey, err := os.ReadFile(config.publicKey)
|
||||
if err != nil {
|
||||
utils.Fatal("Error reading public key: %s ", err)
|
||||
logger.Fatal("Error reading public key: %s ", err)
|
||||
}
|
||||
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
|
||||
if err != nil {
|
||||
utils.Fatal("Error encrypting backup file: %v ", err)
|
||||
logger.Fatal("Error encrypting backup file: %v ", err)
|
||||
}
|
||||
utils.Info("Encrypting backup using public key...done")
|
||||
logger.Info("Encrypting backup using public key...done")
|
||||
|
||||
} else if config.passphrase != "" {
|
||||
utils.Info("Encrypting backup using passphrase...")
|
||||
logger.Info("Encrypting backup using passphrase...")
|
||||
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
|
||||
if err != nil {
|
||||
utils.Fatal("error during encrypting backup %v", err)
|
||||
logger.Fatal("error during encrypting backup %v", err)
|
||||
}
|
||||
utils.Info("Encrypting backup using passphrase...done")
|
||||
logger.Info("Encrypting backup using passphrase...done")
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// Package internal /
|
||||
/*****
|
||||
/*
|
||||
****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
@@ -8,6 +9,7 @@ package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/pg-bkup/pkg/logger"
|
||||
"github.com/jkaninda/pg-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
@@ -86,7 +88,7 @@ type AWSConfig struct {
|
||||
}
|
||||
|
||||
func initDbConfig(cmd *cobra.Command) *dbConfig {
|
||||
//Set env
|
||||
// Set env
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
dConf := dbConfig{}
|
||||
dConf.dbHost = os.Getenv("DB_HOST")
|
||||
@@ -97,8 +99,8 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
|
||||
|
||||
err := utils.CheckEnvVars(dbHVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for database are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
logger.Error("Please make sure all required environment variables for database are set")
|
||||
logger.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
return &dConf
|
||||
}
|
||||
@@ -131,7 +133,7 @@ func loadSSHConfig() (*SSHConfig, error) {
|
||||
}, nil
|
||||
}
|
||||
func loadFtpConfig() *FTPConfig {
|
||||
//Initialize data configs
|
||||
// Initialize data configs
|
||||
fConfig := FTPConfig{}
|
||||
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
|
||||
fConfig.user = os.Getenv("FTP_USER")
|
||||
@@ -140,13 +142,13 @@ func loadFtpConfig() *FTPConfig {
|
||||
fConfig.remotePath = os.Getenv("REMOTE_PATH")
|
||||
err := utils.CheckEnvVars(ftpVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for FTP are set")
|
||||
utils.Fatal("Error missing environment variables: %s", err)
|
||||
logger.Error("Please make sure all required environment variables for FTP are set")
|
||||
logger.Fatal("Error missing environment variables: %s", err)
|
||||
}
|
||||
return &fConfig
|
||||
}
|
||||
func initAWSConfig() *AWSConfig {
|
||||
//Initialize AWS configs
|
||||
// Initialize AWS configs
|
||||
aConfig := AWSConfig{}
|
||||
aConfig.endpoint = utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
||||
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
||||
@@ -167,8 +169,8 @@ func initAWSConfig() *AWSConfig {
|
||||
aConfig.forcePathStyle = forcePathStyle
|
||||
err = utils.CheckEnvVars(awsVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for AWS S3 are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
logger.Error("Please make sure all required environment variables for AWS S3 are set")
|
||||
logger.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
return &aConfig
|
||||
}
|
||||
@@ -176,7 +178,7 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
||||
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||
//Get flag value and set env
|
||||
// Get flag value and set env
|
||||
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
prune := false
|
||||
@@ -198,7 +200,7 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
||||
encryption = true
|
||||
usingKey = false
|
||||
}
|
||||
//Initialize backup configs
|
||||
// Initialize backup configs
|
||||
config := BackupConfig{}
|
||||
config.backupRetention = backupRetention
|
||||
config.disableCompression = disableCompression
|
||||
@@ -228,7 +230,7 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||
|
||||
//Get flag value and set env
|
||||
// Get flag value and set env
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
@@ -242,7 +244,7 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
||||
usingKey = false
|
||||
}
|
||||
|
||||
//Initialize restore configs
|
||||
// Initialize restore configs
|
||||
rConfig := RestoreConfig{}
|
||||
rConfig.s3Path = s3Path
|
||||
rConfig.remotePath = remotePath
|
||||
@@ -265,8 +267,8 @@ func initTargetDbConfig() *targetDbConfig {
|
||||
|
||||
err := utils.CheckEnvVars(tdbRVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for the target database are set")
|
||||
utils.Fatal("Error checking target database environment variables: %s", err)
|
||||
logger.Error("Please make sure all required environment variables for the target database are set")
|
||||
logger.Fatal("Error checking target database environment variables: %s", err)
|
||||
}
|
||||
return &tdbConfig
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// Package internal /
|
||||
/*****
|
||||
/*
|
||||
****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
@@ -9,6 +10,7 @@ package internal
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/jkaninda/pg-bkup/pkg/logger"
|
||||
"github.com/jkaninda/pg-bkup/utils"
|
||||
"gopkg.in/yaml.v3"
|
||||
"os"
|
||||
@@ -18,13 +20,13 @@ import (
|
||||
)
|
||||
|
||||
func intro() {
|
||||
utils.Info("Starting PostgreSQL Backup...")
|
||||
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
|
||||
logger.Info("Starting PostgreSQL Backup...")
|
||||
logger.Info("Copyright (c) 2024 Jonas Kaninda ")
|
||||
}
|
||||
|
||||
// copyToTmp copy file to temporary directory
|
||||
func deleteTemp() {
|
||||
utils.Info("Deleting %s ...", tmpPath)
|
||||
logger.Info("Deleting %s ...", tmpPath)
|
||||
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -40,16 +42,16 @@ func deleteTemp() {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
utils.Error("Error deleting files: %v", err)
|
||||
logger.Error("Error deleting files: %v", err)
|
||||
} else {
|
||||
utils.Info("Deleting %s ... done", tmpPath)
|
||||
logger.Info("Deleting %s ... done", tmpPath)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDatabaseConnection tests the database connection
|
||||
func testDatabaseConnection(db *dbConfig) {
|
||||
|
||||
utils.Info("Connecting to %s database ...", db.dbName)
|
||||
logger.Info("Connecting to %s database ...", db.dbName)
|
||||
// Test database connection
|
||||
query := "SELECT version();"
|
||||
|
||||
@@ -74,10 +76,10 @@ func testDatabaseConnection(db *dbConfig) {
|
||||
// Run the command and capture any errors
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
utils.Fatal("Error running psql command: %v\nOutput: %s\n", err, out.String())
|
||||
logger.Fatal("Error running psql command: %v\nOutput: %s\n", err, out.String())
|
||||
return
|
||||
}
|
||||
utils.Info("Successfully connected to %s database", db.dbName)
|
||||
logger.Info("Successfully connected to %s database", db.dbName)
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// Package internal /
|
||||
/*****
|
||||
/*
|
||||
****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
@@ -8,19 +9,19 @@ package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/pg-bkup/utils"
|
||||
"github.com/jkaninda/pg-bkup/pkg/logger"
|
||||
"github.com/spf13/cobra"
|
||||
"time"
|
||||
)
|
||||
|
||||
func StartMigration(cmd *cobra.Command) {
|
||||
intro()
|
||||
utils.Info("Starting database migration...")
|
||||
//Get DB config
|
||||
logger.Info("Starting database migration...")
|
||||
// Get DB config
|
||||
dbConf = initDbConfig(cmd)
|
||||
targetDbConf = initTargetDbConfig()
|
||||
|
||||
//Defining the target database variables
|
||||
// Defining the target database variables
|
||||
newDbConfig := dbConfig{}
|
||||
newDbConfig.dbHost = targetDbConf.targetDbHost
|
||||
newDbConfig.dbPort = targetDbConf.targetDbPort
|
||||
@@ -28,15 +29,15 @@ func StartMigration(cmd *cobra.Command) {
|
||||
newDbConfig.dbUserName = targetDbConf.targetDbUserName
|
||||
newDbConfig.dbPassword = targetDbConf.targetDbPassword
|
||||
|
||||
//Generate file name
|
||||
// Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
conf := &RestoreConfig{}
|
||||
conf.file = backupFileName
|
||||
//Backup source Database
|
||||
// Backup source Database
|
||||
BackupDatabase(dbConf, backupFileName, true)
|
||||
//Restore source database into target database
|
||||
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
||||
// Restore source database into target database
|
||||
logger.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
||||
RestoreDatabase(&newDbConfig, conf)
|
||||
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
||||
utils.Info("Database migration completed.")
|
||||
logger.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
||||
logger.Info("Database migration completed.")
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// Package internal /
|
||||
/*****
|
||||
/*
|
||||
****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
@@ -7,6 +8,7 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"github.com/jkaninda/pg-bkup/pkg/logger"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -39,20 +41,20 @@ func StartRestore(cmd *cobra.Command) {
|
||||
}
|
||||
}
|
||||
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
|
||||
utils.Info("Restore database from local")
|
||||
logger.Info("Restore database from local")
|
||||
localStorage := local.NewStorage(local.Config{
|
||||
RemotePath: storagePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
err := localStorage.CopyFrom(restoreConf.file)
|
||||
if err != nil {
|
||||
utils.Fatal("Error copying backup file: %s", err)
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
RestoreDatabase(dbConf, restoreConf)
|
||||
|
||||
}
|
||||
func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
|
||||
utils.Info("Restore database from s3")
|
||||
logger.Info("Restore database from s3")
|
||||
awsConfig := initAWSConfig()
|
||||
if conf.remotePath == "" {
|
||||
conf.remotePath = awsConfig.remotePath
|
||||
@@ -69,19 +71,19 @@ func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
utils.Fatal("Error creating s3 storage: %s", err)
|
||||
logger.Fatal("Error creating s3 storage: %s", err)
|
||||
}
|
||||
err = s3Storage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
utils.Fatal("Error download file from S3 storage: %s", err)
|
||||
logger.Fatal("Error download file from S3 storage: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
|
||||
utils.Info("Restore database from remote server")
|
||||
logger.Info("Restore database from remote server")
|
||||
sshConfig, err := loadSSHConfig()
|
||||
if err != nil {
|
||||
utils.Fatal("Error loading ssh config: %s", err)
|
||||
logger.Fatal("Error loading ssh config: %s", err)
|
||||
}
|
||||
|
||||
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||
@@ -94,16 +96,16 @@ func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
utils.Fatal("Error creating SSH storage: %s", err)
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = sshStorage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
utils.Fatal("Error copying backup file: %s", err)
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
|
||||
utils.Info("Restore database from FTP server")
|
||||
logger.Info("Restore database from FTP server")
|
||||
ftpConfig := loadFtpConfig()
|
||||
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||
Host: ftpConfig.host,
|
||||
@@ -114,11 +116,11 @@ func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
utils.Fatal("Error creating SSH storage: %s", err)
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = ftpStorage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
utils.Fatal("Error copying backup file: %s", err)
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
@@ -126,43 +128,43 @@ func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
|
||||
// RestoreDatabase restore database
|
||||
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
|
||||
if conf.file == "" {
|
||||
utils.Fatal("Error, file required")
|
||||
logger.Fatal("Error, file required")
|
||||
}
|
||||
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
|
||||
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file))
|
||||
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file))
|
||||
if err != nil {
|
||||
utils.Fatal("Error reading backup file: %s ", err)
|
||||
logger.Fatal("Error reading backup file: %s ", err)
|
||||
}
|
||||
|
||||
if extension == ".gpg" {
|
||||
|
||||
if conf.usingKey {
|
||||
utils.Info("Decrypting backup using private key...")
|
||||
utils.Warn("Backup decryption using a private key is not fully supported")
|
||||
logger.Info("Decrypting backup using private key...")
|
||||
logger.Warn("Backup decryption using a private key is not fully supported")
|
||||
prKey, err := os.ReadFile(conf.privateKey)
|
||||
if err != nil {
|
||||
utils.Fatal("Error reading public key: %s ", err)
|
||||
logger.Fatal("Error reading public key: %s ", err)
|
||||
}
|
||||
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
|
||||
if err != nil {
|
||||
utils.Fatal("error during decrypting backup %v", err)
|
||||
logger.Fatal("error during decrypting backup %v", err)
|
||||
}
|
||||
utils.Info("Decrypting backup using private key...done")
|
||||
logger.Info("Decrypting backup using private key...done")
|
||||
|
||||
} else {
|
||||
if conf.passphrase == "" {
|
||||
utils.Error("Error, passphrase or private key required")
|
||||
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
|
||||
logger.Error("Error, passphrase or private key required")
|
||||
logger.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
|
||||
} else {
|
||||
utils.Info("Decrypting backup using passphrase...")
|
||||
//decryptWithGPG file
|
||||
logger.Info("Decrypting backup using passphrase...")
|
||||
// decryptWithGPG file
|
||||
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
|
||||
if err != nil {
|
||||
utils.Fatal("Error decrypting file %s %v", file, err)
|
||||
logger.Fatal("Error decrypting file %s %v", file, err)
|
||||
}
|
||||
utils.Info("Decrypting backup using passphrase...done")
|
||||
//Update file name
|
||||
logger.Info("Decrypting backup using passphrase...done")
|
||||
// Update file name
|
||||
conf.file = RemoveLastExtension(file)
|
||||
}
|
||||
}
|
||||
@@ -176,7 +178,7 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
|
||||
return
|
||||
}
|
||||
testDatabaseConnection(db)
|
||||
utils.Info("Restoring database...")
|
||||
logger.Info("Restoring database...")
|
||||
|
||||
extension := filepath.Ext(conf.file)
|
||||
// Restore from compressed file / .sql.gz
|
||||
@@ -184,29 +186,29 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
|
||||
str := "zcat " + filepath.Join(tmpPath, conf.file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
|
||||
_, err := exec.Command("sh", "-c", str).Output()
|
||||
if err != nil {
|
||||
utils.Fatal("Error, in restoring the database %v", err)
|
||||
logger.Fatal("Error, in restoring the database %v", err)
|
||||
}
|
||||
utils.Info("Restoring database... done")
|
||||
utils.Info("Database has been restored")
|
||||
//Delete temp
|
||||
logger.Info("Restoring database... done")
|
||||
logger.Info("Database has been restored")
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
|
||||
} else if extension == ".sql" {
|
||||
//Restore from sql file
|
||||
// Restore from sql file
|
||||
str := "cat " + filepath.Join(tmpPath, conf.file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
|
||||
_, err := exec.Command("sh", "-c", str).Output()
|
||||
if err != nil {
|
||||
utils.Fatal("Error in restoring the database %v", err)
|
||||
logger.Fatal("Error in restoring the database %v", err)
|
||||
}
|
||||
utils.Info("Restoring database... done")
|
||||
utils.Info("Database has been restored")
|
||||
//Delete temp
|
||||
logger.Info("Restoring database... done")
|
||||
logger.Info("Database has been restored")
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
} else {
|
||||
utils.Fatal("Unknown file extension: %s", extension)
|
||||
logger.Fatal("Unknown file extension: %s", extension)
|
||||
}
|
||||
|
||||
} else {
|
||||
utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
|
||||
logger.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,8 @@
|
||||
// Package internal /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package internal
|
||||
|
||||
const tmpPath = "/tmp/backup"
|
||||
const gpgHome = "/config/gnupg"
|
||||
const algorithm = "aes256"
|
||||
const gpgExtension = "gpg"
|
||||
const timeFormat = "2006-01-02 at 15:04:05"
|
||||
|
||||
@@ -44,13 +38,6 @@ var tdbRVars = []string{
|
||||
var dbConf *dbConfig
|
||||
var targetDbConf *targetDbConfig
|
||||
|
||||
// sshVars Required environment variables for SSH remote server storage
|
||||
var sshVars = []string{
|
||||
"SSH_USER",
|
||||
"SSH_HOST_NAME",
|
||||
"SSH_PORT",
|
||||
"REMOTE_PATH",
|
||||
}
|
||||
var ftpVars = []string{
|
||||
"FTP_HOST_NAME",
|
||||
"FTP_USER",
|
||||
|
||||
Reference in New Issue
Block a user