refactor: create storage interface, refactor local, s3, ftp and ssh storage

This commit is contained in:
Jonas Kaninda
2024-10-22 17:21:01 +02:00
parent 3a0137d6ea
commit 431be36210
13 changed files with 719 additions and 480 deletions

View File

@@ -9,6 +9,10 @@ package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/encryptor" "github.com/jkaninda/encryptor"
"github.com/jkaninda/mysql-bkup/pkg/storage/ftp"
"github.com/jkaninda/mysql-bkup/pkg/storage/local"
"github.com/jkaninda/mysql-bkup/pkg/storage/s3"
"github.com/jkaninda/mysql-bkup/pkg/storage/ssh"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -42,7 +46,7 @@ func StartBackup(cmd *cobra.Command) {
} }
// Run in scheduled mode // scheduledMode Runs backup in scheduled mode
func scheduledMode(db *dbConfig, config *BackupConfig) { func scheduledMode(db *dbConfig, config *BackupConfig) {
utils.Info("Running in Scheduled mode") utils.Info("Running in Scheduled mode")
utils.Info("Backup cron expression: %s", config.cronExpression) utils.Info("Backup cron expression: %s", config.cronExpression)
@@ -72,6 +76,19 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
defer c.Stop() defer c.Stop()
select {} select {}
} }
// multiBackupTask backup multi database
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
for _, db := range databases {
//Check if path is defined in config file
if db.Path != "" {
bkConfig.remotePath = db.Path
}
BackupTask(getDatabase(db), bkConfig)
}
}
// BackupTask backups database
func BackupTask(db *dbConfig, config *BackupConfig) { func BackupTask(db *dbConfig, config *BackupConfig) {
utils.Info("Starting backup task...") utils.Info("Starting backup task...")
//Generate file name //Generate file name
@@ -94,17 +111,8 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
localBackup(db, config) localBackup(db, config)
} }
} }
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
for _, db := range databases {
//Check if path is defined in config file
if db.Path != "" {
bkConfig.remotePath = db.Path
}
BackupTask(getDatabase(db), bkConfig)
}
}
func startMultiBackup(bkConfig *BackupConfig, configFile string) { func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Starting multiple backup jobs...") utils.Info("Starting backup task...")
conf, err := readConf(configFile) conf, err := readConf(configFile)
if err != nil { if err != nil {
utils.Fatal("Error reading config file: %s", err) utils.Fatal("Error reading config file: %s", err)
@@ -119,7 +127,7 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
} else { } else {
// Check if cronExpression is valid // Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) { if utils.IsValidCronExpression(bkConfig.cronExpression) {
utils.Info("Running MultiBackup in Scheduled mode") utils.Info("Running backup in Scheduled mode")
utils.Info("Backup cron expression: %s", bkConfig.cronExpression) utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat)) utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", bkConfig.storage) utils.Info("Storage type %s ", bkConfig.storage)
@@ -128,12 +136,11 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Testing backup configurations...") utils.Info("Testing backup configurations...")
multiBackupTask(conf.Databases, bkConfig) multiBackupTask(conf.Databases, bkConfig)
utils.Info("Testing backup configurations...done") utils.Info("Testing backup configurations...done")
utils.Info("Creating multi backup job...") utils.Info("Creating backup job...")
// Create a new cron instance // Create a new cron instance
c := cron.New() c := cron.New()
_, err := c.AddFunc(bkConfig.cronExpression, func() { _, err := c.AddFunc(bkConfig.cronExpression, func() {
// Create a channel
multiBackupTask(conf.Databases, bkConfig) multiBackupTask(conf.Databases, bkConfig)
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat)) utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
@@ -143,7 +150,7 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
} }
// Start the cron scheduler // Start the cron scheduler
c.Start() c.Start()
utils.Info("Creating multi backup job...done") utils.Info("Creating backup job...done")
utils.Info("Backup job started") utils.Info("Backup job started")
defer c.Stop() defer c.Stop()
select {} select {}
@@ -220,7 +227,6 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
utils.Info("Database has been backed up") utils.Info("Database has been backed up")
} }
} }
func localBackup(db *dbConfig, config *BackupConfig) { func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage") utils.Info("Backup database to local storage")
@@ -235,23 +241,34 @@ func localBackup(db *dbConfig, config *BackupConfig) {
if err != nil { if err != nil {
utils.Error("Error:", err) utils.Error("Error:", err)
} }
//Get backup info
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
moveToBackup(finalFileName, storagePath) localStorage := local.NewStorage(local.Config{
LocalPath: tmpPath,
RemotePath: storagePath,
})
err = localStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
BackupSize: backupSize, BackupSize: backupSize,
Database: db.dbName, Database: db.dbName,
Storage: config.storage, Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName), BackupLocation: filepath.Join(storagePath, finalFileName),
StartTime: startTime, StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()), EndTime: time.Now().Format(utils.TimeFormat()),
}) })
//Delete old backup //Delete old backup
if config.prune { if config.prune {
deleteOldBackup(config.backupRetention) err = localStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
} }
//Delete temp //Delete temp
deleteTemp() deleteTemp()
@@ -259,14 +276,9 @@ func localBackup(db *dbConfig, config *BackupConfig) {
} }
func s3Backup(db *dbConfig, config *BackupConfig) { func s3Backup(db *dbConfig, config *BackupConfig) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
if config.remotePath != "" {
s3Path = config.remotePath
}
utils.Info("Backup database to s3 storage") utils.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
//Backup database //Backup database
BackupDatabase(db, config.backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName finalFileName := config.backupFileName
@@ -275,12 +287,28 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup archive to remote storage S3 ... ") utils.Info("Uploading backup archive to remote storage S3 ... ")
awsConfig := initAWSConfig()
if config.remotePath == "" {
config.remotePath = awsConfig.remotePath
}
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path) s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath,
LocalPath: tmpPath,
})
if err != nil { if err != nil {
utils.Fatal("Error uploading backup archive to S3: %s ", err) utils.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
} }
//Get backup info //Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
@@ -288,6 +316,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Error("Error:", err) utils.Error("Error:", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
//Delete backup file from tmp folder //Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
if err != nil { if err != nil {
@@ -296,11 +325,12 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
} }
// Delete old backup // Delete old backup
if config.prune { if config.prune {
err := DeleteOldBackup(bucket, s3Path, config.backupRetention) err := s3Storage.Prune(config.backupRetention)
if err != nil { if err != nil {
utils.Fatal("Error deleting old backup from S3: %s ", err) utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
utils.Info("Uploading backup archive to remote storage S3 ... done ") utils.Info("Uploading backup archive to remote storage S3 ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
@@ -308,7 +338,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
BackupSize: backupSize, BackupSize: backupSize,
Database: db.dbName, Database: db.dbName,
Storage: config.storage, Storage: config.storage,
BackupLocation: filepath.Join(s3Path, finalFileName), BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime, StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()), EndTime: time.Now().Format(utils.TimeFormat()),
}) })
@@ -320,7 +350,6 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
func sshBackup(db *dbConfig, config *BackupConfig) { func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server") utils.Info("Backup database to Remote server")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
//Backup database //Backup database
BackupDatabase(db, config.backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName finalFileName := config.backupFileName
@@ -330,10 +359,25 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Info("Uploading backup archive to remote storage ... ") utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, config.remotePath) sshConfig, err := loadSSHConfig()
if err != nil { if err != nil {
utils.Fatal("Error uploading file to the remote server: %s ", err) utils.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
} }
//Get backup info //Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
@@ -341,6 +385,8 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Error("Error:", err) utils.Error("Error:", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
//Delete backup file from tmp folder //Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
@@ -348,11 +394,12 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
} }
if config.prune { if config.prune {
//TODO: Delete old backup from remote server err := sshStorage.Prune(config.backupRetention)
utils.Info("Deleting old backup from a remote server is not implemented yet") if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
}
utils.Info("Uploading backup archive to remote storage ... done ") utils.Info("Uploading backup archive to remote storage ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
@@ -382,11 +429,23 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Info("Uploading backup archive to the remote FTP server ... ") utils.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := CopyToFTP(finalFileName, config.remotePath) ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil { if err != nil {
utils.Fatal("Error uploading file to the remote FTP server: %s ", err) utils.Fatal("Error creating SSH storage: %s", err)
} }
err = ftpStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
//Get backup info //Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
@@ -400,12 +459,15 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
} }
if config.prune { if config.prune {
//TODO: Delete old backup from remote server err := ftpStorage.Prune(config.backupRetention)
utils.Info("Deleting old backup from a remote server is not implemented yet") if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
} }
utils.Info("Uploading backup archive to the remote FTP server ... done ") utils.Info("Uploading backup archive to the remote FTP server ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -419,7 +481,6 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
//Delete temp //Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") utils.Info("Backup completed successfully")
} }
func encryptBackup(config *BackupConfig) { func encryptBackup(config *BackupConfig) {

View File

@@ -80,6 +80,7 @@ type AWSConfig struct {
accessKey string accessKey string
secretKey string secretKey string
region string region string
remotePath string
disableSsl bool disableSsl bool
forcePathStyle bool forcePathStyle bool
} }
@@ -129,7 +130,7 @@ func loadSSHConfig() (*SSHConfig, error) {
identifyFile: os.Getenv("SSH_IDENTIFY_FILE"), identifyFile: os.Getenv("SSH_IDENTIFY_FILE"),
}, nil }, nil
} }
func initFtpConfig() *FTPConfig { func loadFtpConfig() *FTPConfig {
//Initialize data configs //Initialize data configs
fConfig := FTPConfig{} fConfig := FTPConfig{}
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME") fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
@@ -151,6 +152,8 @@ func initAWSConfig() *AWSConfig {
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY") aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY") aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
aConfig.remotePath = utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
aConfig.region = os.Getenv("AWS_REGION") aConfig.region = os.Getenv("AWS_REGION")
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL")) disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil { if err != nil {
@@ -255,7 +258,7 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
func initTargetDbConfig() *targetDbConfig { func initTargetDbConfig() *targetDbConfig {
tdbConfig := targetDbConfig{} tdbConfig := targetDbConfig{}
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST") tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "3306") tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "5432")
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME") tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME") tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD") tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")

View File

@@ -1,81 +0,0 @@
package pkg
import (
"fmt"
"github.com/jlaffaye/ftp"
"io"
"os"
"path/filepath"
"time"
)
// initFtpClient initializes and authenticates an FTP client
func initFtpClient() (*ftp.ServerConn, error) {
ftpConfig := initFtpConfig()
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
if err != nil {
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
}
return ftpClient, nil
}
// CopyToFTP uploads a file to the remote FTP server
func CopyToFTP(fileName, remotePath string) (err error) {
ftpConfig := initFtpConfig()
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", fileName, err)
}
defer file.Close()
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
err = ftpClient.Stor(remoteFilePath, file)
if err != nil {
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
}
return nil
}
// CopyFromFTP downloads a file from the remote FTP server
func CopyFromFTP(fileName, remotePath string) (err error) {
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
remoteFilePath := filepath.Join(remotePath, fileName)
r, err := ftpClient.Retr(remoteFilePath)
if err != nil {
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
}
defer r.Close()
localFilePath := filepath.Join(tmpPath, fileName)
outFile, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
}
defer outFile.Close()
_, err = io.Copy(outFile, r)
if err != nil {
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
}
return nil
}

View File

@@ -15,75 +15,14 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
) )
func copyToTmp(sourcePath string, backupFileName string) { func intro() {
//Copy backup from storage to /tmp utils.Info("Starting MySQL Backup...")
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName)) utils.Info("Copyright (c) 2024 Jonas Kaninda ")
if err != nil {
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
}
} }
func moveToBackup(backupFileName string, destinationPath string) {
//Copy backup from tmp folder to storage destination
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
if err != nil {
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
} // copyToTmp copy file to temporary directory
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
fmt.Println("Error deleting file:", err)
}
utils.Info("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
}
func deleteOldBackup(retentionDays int) {
utils.Info("Deleting old backups...")
storagePath = os.Getenv("STORAGE_PATH")
// Define the directory path
backupDir := storagePath + "/"
// Get current time
currentTime := time.Now()
// Delete file
deleteFile := func(filePath string) error {
err := os.Remove(filePath)
if err != nil {
utils.Fatal(fmt.Sprintf("Error: %s", err))
} else {
utils.Info("File %s has been deleted successfully", filePath)
}
return err
}
// Walk through the directory and delete files modified more than specified days ago
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if it's a regular file and if it was modified more than specified days ago
if fileInfo.Mode().IsRegular() {
timeDiff := currentTime.Sub(fileInfo.ModTime())
if timeDiff.Hours() > 24*float64(retentionDays) {
err := deleteFile(filePath)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
utils.Fatal(fmt.Sprintf("Error: %s", err))
return
}
utils.Info("Deleting old backups...done")
}
func deleteTemp() { func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath) utils.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error { err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
@@ -127,10 +66,8 @@ func testDatabaseConnection(db *dbConfig) {
utils.Info("Successfully connected to %s database", db.dbName) utils.Info("Successfully connected to %s database", db.dbName)
} }
func intro() {
utils.Info("Starting MySQL Backup...") // checkPubKeyFile checks gpg public key
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
}
func checkPubKeyFile(pubKey string) (string, error) { func checkPubKeyFile(pubKey string) (string, error) {
// Define possible key file names // Define possible key file names
keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey} keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey}
@@ -152,6 +89,8 @@ func checkPubKeyFile(pubKey string) (string, error) {
// Return an error if neither file exists // Return an error if neither file exists
return "", fmt.Errorf("no public key file found") return "", fmt.Errorf("no public key file found")
} }
// checkPrKeyFile checks private key
func checkPrKeyFile(prKey string) (string, error) { func checkPrKeyFile(prKey string) (string, error) {
// Define possible key file names // Define possible key file names
keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey} keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey}
@@ -173,8 +112,9 @@ func checkPrKeyFile(prKey string) (string, error) {
// Return an error if neither file exists // Return an error if neither file exists
return "", fmt.Errorf("no public key file found") return "", fmt.Errorf("no public key file found")
} }
// readConf reads config file and returns Config
func readConf(configFile string) (*Config, error) { func readConf(configFile string) (*Config, error) {
//configFile := filepath.Join("./", filename)
if utils.FileExists(configFile) { if utils.FileExists(configFile) {
buf, err := os.ReadFile(configFile) buf, err := os.ReadFile(configFile)
if err != nil { if err != nil {
@@ -191,6 +131,8 @@ func readConf(configFile string) (*Config, error) {
} }
return nil, fmt.Errorf("config file %q not found", configFile) return nil, fmt.Errorf("config file %q not found", configFile)
} }
// checkConfigFile checks config files and returns one config file
func checkConfigFile(filePath string) (string, error) { func checkConfigFile(filePath string) (string, error) {
// Define possible config file names // Define possible config file names
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath} configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}

View File

@@ -8,6 +8,10 @@ package pkg
import ( import (
"github.com/jkaninda/encryptor" "github.com/jkaninda/encryptor"
"github.com/jkaninda/mysql-bkup/pkg/storage/ftp"
"github.com/jkaninda/mysql-bkup/pkg/storage/local"
"github.com/jkaninda/mysql-bkup/pkg/storage/s3"
"github.com/jkaninda/mysql-bkup/pkg/storage/ssh"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
@@ -22,9 +26,7 @@ func StartRestore(cmd *cobra.Command) {
switch restoreConf.storage { switch restoreConf.storage {
case "local": case "local":
utils.Info("Restore database from local") localRestore(dbConf, restoreConf)
copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(dbConf, restoreConf)
case "s3", "S3": case "s3", "S3":
restoreFromS3(dbConf, restoreConf) restoreFromS3(dbConf, restoreConf)
case "ssh", "SSH", "remote": case "ssh", "SSH", "remote":
@@ -32,33 +34,89 @@ func StartRestore(cmd *cobra.Command) {
case "ftp", "FTP": case "ftp", "FTP":
restoreFromFTP(dbConf, restoreConf) restoreFromFTP(dbConf, restoreConf)
default: default:
utils.Info("Restore database from local") localRestore(dbConf, restoreConf)
copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(dbConf, restoreConf)
} }
} }
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
utils.Info("Restore database from local")
localStorage := local.NewStorage(local.Config{
RemotePath: storagePath,
LocalPath: tmpPath,
})
err := localStorage.CopyFrom(restoreConf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(dbConf, restoreConf)
}
func restoreFromS3(db *dbConfig, conf *RestoreConfig) { func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from s3") utils.Info("Restore database from s3")
err := DownloadFile(tmpPath, conf.file, conf.bucket, conf.s3Path) awsConfig := initAWSConfig()
if conf.remotePath == "" {
conf.remotePath = awsConfig.remotePath
}
s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath,
LocalPath: tmpPath,
})
if err != nil { if err != nil {
utils.Fatal("Error download file from s3 %s %v ", conf.file, err) utils.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error download file from S3 storage: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) { func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from remote server") utils.Info("Restore database from remote server")
err := CopyFromRemote(conf.file, conf.remotePath) sshConfig, err := loadSSHConfig()
if err != nil { if err != nil {
utils.Fatal("Error download file from remote server: %s %v", filepath.Join(conf.remotePath, conf.file), err) utils.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %w", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) { func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from FTP server") utils.Info("Restore database from FTP server")
err := CopyFromFTP(conf.file, conf.remotePath) ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil { if err != nil {
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(conf.remotePath, conf.file), err) utils.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }

151
pkg/s3.go
View File

@@ -1,151 +0,0 @@
// Package pkg
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"bytes"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/jkaninda/mysql-bkup/utils"
"net/http"
"os"
"path/filepath"
"time"
)
// CreateSession creates a new AWS session
func CreateSession() (*session.Session, error) {
awsConfig := initAWSConfig()
// Configure to use MinIO Server
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(awsConfig.accessKey, awsConfig.secretKey, ""),
Endpoint: aws.String(awsConfig.endpoint),
Region: aws.String(awsConfig.region),
DisableSSL: aws.Bool(awsConfig.disableSsl),
S3ForcePathStyle: aws.Bool(awsConfig.forcePathStyle),
}
return session.NewSession(s3Config)
}
// UploadFileToS3 uploads a file to S3 with a given prefix
func UploadFileToS3(filePath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
file, err := os.Open(filepath.Join(filePath, key))
if err != nil {
return err
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
objectKey := filepath.Join(prefix, key)
buffer := make([]byte, fileInfo.Size())
file.Read(buffer)
fileBytes := bytes.NewReader(buffer)
fileType := http.DetectContentType(buffer)
_, err = svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
Body: fileBytes,
ContentLength: aws.Int64(fileInfo.Size()),
ContentType: aws.String(fileType),
})
if err != nil {
return err
}
return nil
}
func DownloadFile(destinationPath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
utils.Info("Download data from S3 storage...")
file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil {
utils.Error("Failed to create file", err)
return err
}
defer file.Close()
objectKey := filepath.Join(prefix, key)
downloader := s3manager.NewDownloader(sess)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
if err != nil {
utils.Error("Failed to download file %s", key)
return err
}
utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
return nil
}
func DeleteOldBackup(bucket, prefix string, retention int) error {
utils.Info("Deleting old backups...")
utils.Info("Bucket %s Prefix: %s Retention: %d", bucket, prefix, retention)
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
// Get the current time
now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retention)
// List objects in the bucket
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
}
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) {
utils.Info("Deleting old backup: %s", *object.Key)
// Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: object.Key,
})
if err != nil {
utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else {
utils.Info("Deleted object %s", *object.Key)
}
}
}
return !lastPage
})
if err != nil {
utils.Error("Failed to list objects: %v", err)
}
utils.Info("Deleting old backups...done")
return nil
}

View File

@@ -1,111 +0,0 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"context"
"errors"
"fmt"
"github.com/bramvdbogaerde/go-scp"
"github.com/bramvdbogaerde/go-scp/auth"
"github.com/jkaninda/mysql-bkup/utils"
"golang.org/x/crypto/ssh"
"os"
"path/filepath"
)
// createSSHClientConfig sets up the SSH client configuration based on the provided SSHConfig
func createSSHClientConfig(sshConfig *SSHConfig) (ssh.ClientConfig, error) {
if sshConfig.identifyFile != "" && utils.FileExists(sshConfig.identifyFile) {
return auth.PrivateKey(sshConfig.user, sshConfig.identifyFile, ssh.InsecureIgnoreHostKey())
} else {
if sshConfig.password == "" {
return ssh.ClientConfig{}, errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
}
utils.Warn("Accessing the remote server using password, which is not recommended.")
return auth.PasswordKey(sshConfig.user, sshConfig.password, ssh.InsecureIgnoreHostKey())
}
}
// CopyToRemote copies a file to a remote server via SCP
func CopyToRemote(fileName, remotePath string) error {
// Load environment variables
sshConfig, err := loadSSHConfig()
if err != nil {
return fmt.Errorf("failed to load SSH configuration: %w", err)
}
// Initialize SSH client config
clientConfig, err := createSSHClientConfig(sshConfig)
if err != nil {
return fmt.Errorf("failed to create SSH client config: %w", err)
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
// Connect to the remote server
err = client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n")
}
// Open the local file
filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", filePath, err)
}
defer client.Close()
// Copy file to the remote server
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
if err != nil {
return fmt.Errorf("failed to copy file to remote server: %w", err)
}
return nil
}
func CopyFromRemote(fileName, remotePath string) error {
// Load environment variables
sshConfig, err := loadSSHConfig()
if err != nil {
return fmt.Errorf("failed to load SSH configuration: %w", err)
}
// Initialize SSH client config
clientConfig, err := createSSHClientConfig(sshConfig)
if err != nil {
return fmt.Errorf("failed to create SSH client config: %w", err)
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
// Connect to the remote server
err = client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n")
}
// Close client connection after the file has been copied
defer client.Close()
file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
fmt.Println("Couldn't open the output file")
}
defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
if err != nil {
utils.Error("Error while copying file %s ", err)
return err
}
return nil
}

118
pkg/storage/ftp/ftp.go Normal file
View File

@@ -0,0 +1,118 @@
package ftp
import (
"fmt"
"github.com/jkaninda/mysql-bkup/pkg/storage"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/jlaffaye/ftp"
"io"
"os"
"path/filepath"
"time"
)
type ftpStorage struct {
*storage.Backend
client *ftp.ServerConn
}
// Config holds the SSH connection details
type Config struct {
Host string
User string
Password string
Port string
LocalPath string
RemotePath string
}
// createClient creates FTP Client
func createClient(conf Config) (*ftp.ServerConn, error) {
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", conf.Host, conf.Port), ftp.DialWithTimeout(5*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
err = ftpClient.Login(conf.User, conf.Password)
if err != nil {
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
}
return ftpClient, nil
}
// NewStorage creates new Storage
func NewStorage(conf Config) (storage.Storage, error) {
client, err := createClient(conf)
if err != nil {
return nil, err
}
return &ftpStorage{
client: client,
Backend: &storage.Backend{
RemotePath: conf.RemotePath,
LocalPath: conf.LocalPath,
},
}, nil
}
// Copy copies file to the remote server
func (s ftpStorage) Copy(fileName string) error {
ftpClient := s.client
defer ftpClient.Quit()
filePath := filepath.Join(s.LocalPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", fileName, err)
}
defer file.Close()
remoteFilePath := filepath.Join(s.RemotePath, fileName)
err = ftpClient.Stor(remoteFilePath, file)
if err != nil {
return fmt.Errorf("failed to upload file %s: %w", filepath.Join(s.LocalPath, fileName), err)
}
return nil
}
// CopyFrom copies a file from the remote server to local storage
func (s ftpStorage) CopyFrom(fileName string) error {
ftpClient := s.client
defer ftpClient.Quit()
remoteFilePath := filepath.Join(s.RemotePath, fileName)
r, err := ftpClient.Retr(remoteFilePath)
if err != nil {
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
}
defer r.Close()
localFilePath := filepath.Join(s.LocalPath, fileName)
outFile, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
}
defer outFile.Close()
_, err = io.Copy(outFile, r)
if err != nil {
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
}
return nil
}
// Prune deletes old backup created more than specified days
func (s ftpStorage) Prune(retentionDays int) error {
utils.Info("Deleting old backup from a remote server is not implemented yet")
return nil
}
// Name returns the storage name
func (s ftpStorage) Name() string {
return "ftp"
}

108
pkg/storage/local/local.go Normal file
View File

@@ -0,0 +1,108 @@
package local
import (
"github.com/jkaninda/mysql-bkup/pkg/storage"
"github.com/jkaninda/mysql-bkup/utils"
"io"
"os"
"path/filepath"
"time"
)
type localStorage struct {
*storage.Backend
}
type Config struct {
LocalPath string
RemotePath string
}
func NewStorage(conf Config) storage.Storage {
return &localStorage{
Backend: &storage.Backend{
LocalPath: conf.LocalPath,
RemotePath: conf.RemotePath,
},
}
}
func (l localStorage) Copy(file string) error {
if _, err := os.Stat(filepath.Join(l.LocalPath, file)); os.IsNotExist(err) {
return err
}
err := copyFile(filepath.Join(l.LocalPath, file), filepath.Join(l.RemotePath, file))
if err != nil {
return err
}
return nil
}
func (l localStorage) CopyFrom(file string) error {
if _, err := os.Stat(filepath.Join(l.RemotePath, file)); os.IsNotExist(err) {
return err
}
err := copyFile(filepath.Join(l.RemotePath, file), filepath.Join(l.LocalPath, file))
if err != nil {
return err
}
return nil
}
// Prune deletes old backup created more than specified days
func (l localStorage) Prune(retentionDays int) error {
currentTime := time.Now()
// Delete file
deleteFile := func(filePath string) error {
err := os.Remove(filePath)
if err != nil {
utils.Fatal("Error:", err)
} else {
utils.Info("File %s deleted successfully", filePath)
}
return err
}
// Walk through the directory and delete files modified more than specified days ago
err := filepath.Walk(l.RemotePath, func(filePath string, fileInfo os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if it's a regular file and if it was modified more than specified days ago
if fileInfo.Mode().IsRegular() {
timeDiff := currentTime.Sub(fileInfo.ModTime())
if timeDiff.Hours() > 24*float64(retentionDays) {
err := deleteFile(filePath)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
return err
}
return nil
}
func (l localStorage) Name() string {
return "local"
}
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}

162
pkg/storage/s3/s3.go Normal file
View File

@@ -0,0 +1,162 @@
package s3
import (
"bytes"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/jkaninda/mysql-bkup/pkg/storage"
"github.com/jkaninda/mysql-bkup/utils"
"net/http"
"os"
"path/filepath"
"time"
)
type s3Storage struct {
*storage.Backend
client *session.Session
bucket string
}
type Config struct {
Endpoint string
Bucket string
AccessKey string
SecretKey string
Region string
DisableSsl bool
ForcePathStyle bool
LocalPath string
RemotePath string
}
// CreateSession creates a new AWS session
func createSession(conf Config) (*session.Session, error) {
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(conf.AccessKey, conf.SecretKey, ""),
Endpoint: aws.String(conf.Endpoint),
Region: aws.String(conf.Region),
DisableSSL: aws.Bool(conf.DisableSsl),
S3ForcePathStyle: aws.Bool(conf.ForcePathStyle),
}
return session.NewSession(s3Config)
}
func NewStorage(conf Config) (storage.Storage, error) {
sess, err := createSession(conf)
if err != nil {
return nil, err
}
return &s3Storage{
client: sess,
bucket: conf.Bucket,
Backend: &storage.Backend{
RemotePath: conf.RemotePath,
LocalPath: conf.LocalPath,
},
}, nil
}
func (s s3Storage) Copy(fileName string) error {
svc := s3.New(s.client)
file, err := os.Open(filepath.Join(s.LocalPath, fileName))
if err != nil {
return err
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
objectKey := filepath.Join(s.RemotePath, fileName)
buffer := make([]byte, fileInfo.Size())
file.Read(buffer)
fileBytes := bytes.NewReader(buffer)
fileType := http.DetectContentType(buffer)
_, err = svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(objectKey),
Body: fileBytes,
ContentLength: aws.Int64(fileInfo.Size()),
ContentType: aws.String(fileType),
})
if err != nil {
return err
}
return nil
}
// CopyFrom copies a file from the remote server to local storage
func (s s3Storage) CopyFrom(fileName string) error {
file, err := os.Create(filepath.Join(s.LocalPath, fileName))
if err != nil {
return err
}
defer file.Close()
objectKey := filepath.Join(s.RemotePath, fileName)
downloader := s3manager.NewDownloader(s.client)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(objectKey),
})
if err != nil {
utils.Error("Failed to download file %s", fileName)
return err
}
utils.Info("Backup downloaded: %s , bytes size: %d ", file.Name(), uint64(numBytes))
return nil
}
// Prune deletes old backup created more than specified days
func (s s3Storage) Prune(retentionDays int) error {
svc := s3.New(s.client)
// Get the current time
now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retentionDays)
// List objects in the bucket
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(s.bucket),
Prefix: aws.String(s.RemotePath),
}
err := svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) {
utils.Info("Deleting old backup: %s", *object.Key)
// Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: object.Key,
})
if err != nil {
utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else {
utils.Info("Deleted object %s", *object.Key)
}
}
}
return !lastPage
})
if err != nil {
utils.Error("Failed to list objects: %v", err)
}
utils.Info("Deleting old backups...done")
return nil
}
// Name returns the storage name
func (s s3Storage) Name() string {
return "s3"
}

116
pkg/storage/ssh/ssh.go Normal file
View File

@@ -0,0 +1,116 @@
package ssh
import (
"context"
"errors"
"fmt"
"github.com/bramvdbogaerde/go-scp"
"github.com/bramvdbogaerde/go-scp/auth"
"github.com/jkaninda/mysql-bkup/pkg/storage"
"github.com/jkaninda/mysql-bkup/utils"
"golang.org/x/crypto/ssh"
"os"
"path/filepath"
)
type sshStorage struct {
*storage.Backend
client scp.Client
}
// Config holds the SSH connection details
type Config struct {
Host string
User string
Password string
Port string
IdentifyFile string
LocalPath string
RemotePath string
}
func createClient(conf Config) (scp.Client, error) {
if conf.IdentifyFile != "" && utils.FileExists(conf.IdentifyFile) {
clientConfig, err := auth.PrivateKey(conf.User, conf.IdentifyFile, ssh.InsecureIgnoreHostKey())
return scp.NewClient(fmt.Sprintf("%s:%s", conf.Host, conf.Port), &clientConfig), err
} else {
if conf.Password == "" {
return scp.Client{}, errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
}
utils.Warn("Accessing the remote server using password, which is not recommended.")
clientConfig, err := auth.PasswordKey(conf.User, conf.Password, ssh.InsecureIgnoreHostKey())
return scp.NewClient(fmt.Sprintf("%s:%s", conf.Host, conf.Port), &clientConfig), err
}
}
func NewStorage(conf Config) (storage.Storage, error) {
client, err := createClient(conf)
if err != nil {
return nil, err
}
return &sshStorage{
client: client,
Backend: &storage.Backend{
RemotePath: conf.RemotePath,
LocalPath: conf.LocalPath,
},
}, nil
}
func (s sshStorage) Copy(fileName string) error {
client := s.client
// Connect to the remote server
err := client.Connect()
if err != nil {
return errors.New("couldn't establish a connection to the remote server")
}
// Open the local file
filePath := filepath.Join(s.LocalPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", filePath, err)
}
defer client.Close()
// Copy file to the remote server
err = client.CopyFromFile(context.Background(), *file, filepath.Join(s.RemotePath, fileName), "0655")
if err != nil {
return fmt.Errorf("failed to copy file to remote server: %w", err)
}
return nil
}
// CopyFrom copies a file from the remote server to local storage
func (s sshStorage) CopyFrom(fileName string) error {
// Create a new SCP client
client := s.client
// Connect to the remote server
err := client.Connect()
if err != nil {
return errors.New("couldn't establish a connection to the remote server")
}
// Close client connection after the file has been copied
defer client.Close()
file, err := os.OpenFile(filepath.Join(s.LocalPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
return errors.New("couldn't open the output file")
}
defer file.Close()
err = client.CopyFromRemote(context.Background(), file, filepath.Join(s.RemotePath, fileName))
if err != nil {
return err
}
return nil
}
// Prune deletes old backup created more than specified days
func (s sshStorage) Prune(retentionDays int) error {
utils.Info("Deleting old backup from a remote server is not implemented yet")
return nil
}
func (s sshStorage) Name() string {
return "ssh"
}

14
pkg/storage/storage.go Normal file
View File

@@ -0,0 +1,14 @@
package storage
type Storage interface {
Copy(fileName string) error
CopyFrom(fileName string) error
Prune(retentionDays int) error
Name() string
}
type Backend struct {
//Local Path
LocalPath string
//Remote path or Destination path
RemotePath string
}

View File

@@ -6,18 +6,17 @@
**/ **/
package pkg package pkg
const cronLogFile = "/var/log/mysql-bkup.log"
const tmpPath = "/tmp/backup" const tmpPath = "/tmp/backup"
const algorithm = "aes256"
const gpgHome = "/config/gnupg" const gpgHome = "/config/gnupg"
const gpgExtension = "gpg" const gpgExtension = "gpg"
const workingDir = "/config"
const timeFormat = "2006-01-02 at 15:04:05" const timeFormat = "2006-01-02 at 15:04:05"
var ( var (
storage = "local" storage = "local"
file = "" file = ""
storagePath = "/backup" storagePath = "/backup"
workingDir = "/config"
disableCompression = false disableCompression = false
encryption = false encryption = false
usingKey = false usingKey = false
@@ -28,6 +27,7 @@ var (
// dbHVars Required environment variables for database // dbHVars Required environment variables for database
var dbHVars = []string{ var dbHVars = []string{
"DB_HOST", "DB_HOST",
"DB_PORT",
"DB_PASSWORD", "DB_PASSWORD",
"DB_USERNAME", "DB_USERNAME",
"DB_NAME", "DB_NAME",
@@ -43,12 +43,12 @@ var tdbRVars = []string{
var dbConf *dbConfig var dbConf *dbConfig
var targetDbConf *targetDbConfig var targetDbConf *targetDbConfig
// sshHVars Required environment variables for SSH remote server storage // sshVars Required environment variables for SSH remote server storage
var sshHVars = []string{ var sshVars = []string{
"SSH_USER", "SSH_USER",
"REMOTE_PATH",
"SSH_HOST_NAME", "SSH_HOST_NAME",
"SSH_PORT", "SSH_PORT",
"REMOTE_PATH",
} }
var ftpVars = []string{ var ftpVars = []string{
"FTP_HOST_NAME", "FTP_HOST_NAME",