Compare commits

...

12 Commits

Author SHA1 Message Date
fd444293b4 Merge pull request #153 from jkaninda/refactor
fix: S3 remote path when backing up multiple databases
2024-12-07 02:34:42 +01:00
Jonas Kaninda
1940ceba9a fix: S3 remote path when backing up multiple databases 2024-12-07 02:25:22 +01:00
Jonas Kaninda
07d580a8a9 refactoring of code 2024-12-07 02:23:38 +01:00
9a261b22ec Merge pull request #152 from jkaninda/refactor
docs: update features
2024-12-06 22:09:12 +01:00
Jonas Kaninda
e7a58f0569 docs: update features 2024-12-06 22:08:51 +01:00
1b529725d7 Merge pull request #151 from jkaninda/refactor
fix: fatal logger notification
2024-12-06 21:03:47 +01:00
Jonas Kaninda
d8c73560b8 fix: fatal logger notification 2024-12-06 21:00:26 +01:00
Jonas Kaninda
d5a0adc981 refactoring of code 2024-12-06 20:53:46 +01:00
6df3bae9e2 Merge pull request #150 from jkaninda/feature/azure-blob
chore: update base image tag version
2024-12-06 20:23:46 +01:00
Jonas Kaninda
f7d624fd15 chore: update base image tag version 2024-12-06 20:23:08 +01:00
1e9e1ed951 Merge pull request #149 from jkaninda/feature/azure-blob
chore: update app package
2024-12-06 20:17:57 +01:00
Jonas Kaninda
917ba8947f chore: update app package 2024-12-06 20:16:56 +01:00
24 changed files with 241 additions and 267 deletions

View File

@@ -8,9 +8,9 @@ COPY . .
RUN go mod download RUN go mod download
# Build # Build
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-X 'github.com/jkaninda/pg-bkup/utils.Version=${appVersion}'" -o /app/mysql-bkup RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-X 'github.com/jkaninda/mysql-bkup/utils.Version=${appVersion}'" -o /app/mysql-bkup
FROM alpine:3.20.3 FROM alpine:3.21.0
ENV TZ=UTC ENV TZ=UTC
ARG WORKDIR="/config" ARG WORKDIR="/config"
ARG BACKUPDIR="/backup" ARG BACKUPDIR="/backup"

View File

@@ -24,7 +24,7 @@ It supports a variety of storage options and ensures data security through GPG e
- **Deployment Flexibility:** - **Deployment Flexibility:**
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image. - Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**. - Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- Supports recurring backups of PostgreSQL databases when deployed: - Supports recurring backups of MySQL databases when deployed:
- On Docker for automated backup schedules. - On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes. - As a **Job** or **CronJob** on Kubernetes.
@@ -35,9 +35,9 @@ It supports a variety of storage options and ensures data security through GPG e
## Use Cases ## Use Cases
- **Automated Recurring Backups:** Schedule regular backups for PostgreSQL databases. - **Automated Recurring Backups:** Schedule regular backups for MySQL databases.
- **Cross-Environment Migration:** Easily migrate your PostgreSQL databases across different environments using supported storage options. - **Cross-Environment Migration:** Easily migrate your MySQL databases across different environments using supported storage options.
- **Secure Backup Management:** Protect your data with Gmysql encryption. - **Secure Backup Management:** Protect your data with GPG encryption.
Successfully tested on: Successfully tested on:

View File

@@ -1,4 +1,3 @@
// Package cmd /
/* /*
MIT License MIT License
@@ -22,11 +21,11 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package cmd package cmd
import ( import (
"github.com/jkaninda/mysql-bkup/internal" "github.com/jkaninda/mysql-bkup/pkg"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -37,9 +36,9 @@ var BackupCmd = &cobra.Command{
Example: utils.BackupExample, Example: utils.BackupExample,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 { if len(args) == 0 {
internal.StartBackup(cmd) pkg.StartBackup(cmd)
} else { } else {
logger.Fatal(`"backup" accepts no argument %q`, args) utils.Fatal(`"backup" accepts no argument %q`, args)
} }
}, },
} }

View File

@@ -1,4 +1,3 @@
// Package cmd /
/* /*
MIT License MIT License
@@ -22,11 +21,12 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package cmd package cmd
import ( import (
"github.com/jkaninda/mysql-bkup/internal" "github.com/jkaninda/mysql-bkup/pkg"
"github.com/jkaninda/mysql-bkup/pkg/logger" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -35,9 +35,9 @@ var MigrateCmd = &cobra.Command{
Short: "Migrate database from a source database to a target database", Short: "Migrate database from a source database to a target database",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 { if len(args) == 0 {
internal.StartMigration(cmd) pkg.StartMigration(cmd)
} else { } else {
logger.Fatal(`"migrate" accepts no argument %q`, args) utils.Fatal(`"migrate" accepts no argument %q`, args)
} }

View File

@@ -1,5 +1,3 @@
package cmd
/* /*
MIT License MIT License
@@ -23,9 +21,11 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package cmd
import ( import (
"github.com/jkaninda/mysql-bkup/internal" "github.com/jkaninda/mysql-bkup/pkg"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -36,9 +36,9 @@ var RestoreCmd = &cobra.Command{
Example: utils.RestoreExample, Example: utils.RestoreExample,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 { if len(args) == 0 {
internal.StartRestore(cmd) pkg.StartRestore(cmd)
} else { } else {
logger.Fatal(`"restore" accepts no argument %q`, args) utils.Fatal(`"restore" accepts no argument %q`, args)
} }

View File

@@ -1,4 +1,3 @@
// Package cmd /
/* /*
MIT License MIT License
@@ -22,6 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package cmd package cmd
import ( import (

View File

@@ -1,4 +1,3 @@
// Package cmd /
/* /*
MIT License MIT License
@@ -22,10 +21,12 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package cmd package cmd
import ( import (
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
) )
@@ -41,6 +42,6 @@ var VersionCmd = &cobra.Command{
} }
func Version() { func Version() {
fmt.Printf("Version: %s \n", appVersion) fmt.Printf("Version: %s \n", utils.Version)
fmt.Println() fmt.Println()
} }

View File

@@ -25,7 +25,7 @@ It supports a variety of storage options and ensures data security through GPG e
- **Deployment Flexibility:** - **Deployment Flexibility:**
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image. - Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**. - Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- Supports recurring backups of PostgreSQL databases when deployed: - Supports recurring backups of MySQL databases when deployed:
- On Docker for automated backup schedules. - On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes. - As a **Job** or **CronJob** on Kubernetes.
@@ -36,9 +36,9 @@ It supports a variety of storage options and ensures data security through GPG e
## Use Cases ## Use Cases
- **Automated Recurring Backups:** Schedule regular backups for PostgreSQL databases. - **Automated Recurring Backups:** Schedule regular backups for MySQL databases.
- **Cross-Environment Migration:** Easily migrate your PostgreSQL databases across different environments using supported storage options. - **Cross-Environment Migration:** Easily migrate your MySQL databases across different environments using supported storage options.
- **Secure Backup Management:** Protect your data with Gmysql encryption. - **Secure Backup Management:** Protect your data with GPG encryption.

View File

@@ -1,4 +1,3 @@
// Package main /
/* /*
MIT License MIT License
@@ -22,6 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package main package main
import "github.com/jkaninda/mysql-bkup/cmd" import "github.com/jkaninda/mysql-bkup/cmd"

View File

@@ -22,12 +22,11 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package internal package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/go-storage/pkg/azure" "github.com/jkaninda/go-storage/pkg/azure"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"os" "os"
@@ -36,7 +35,7 @@ import (
) )
func azureBackup(db *dbConfig, config *BackupConfig) { func azureBackup(db *dbConfig, config *BackupConfig) {
logger.Info("Backup database to the remote FTP server") utils.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
@@ -46,8 +45,8 @@ func azureBackup(db *dbConfig, config *BackupConfig) {
encryptBackup(config) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
logger.Info("Uploading backup archive to Azure Blob storage ...") utils.Info("Uploading backup archive to Azure Blob storage ...")
logger.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
azureConfig := loadAzureConfig() azureConfig := loadAzureConfig()
azureStorage, err := azure.NewStorage(azure.Config{ azureStorage, err := azure.NewStorage(azure.Config{
ContainerName: azureConfig.containerName, ContainerName: azureConfig.containerName,
@@ -57,34 +56,34 @@ func azureBackup(db *dbConfig, config *BackupConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
logger.Fatal("Error creating SSH storage: %s", err) utils.Fatal("Error creating Azure storage: %s", err)
} }
err = azureStorage.Copy(finalFileName) err = azureStorage.Copy(finalFileName)
if err != nil { if err != nil {
logger.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName)) utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Get backup info // Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
logger.Error("Error: %s", err) utils.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
// Delete backup file from tmp folder // Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
logger.Error("Error deleting file: %v", err) utils.Error("Error deleting file: %v", err)
} }
if config.prune { if config.prune {
err := azureStorage.Prune(config.backupRetention) err := azureStorage.Prune(config.backupRetention)
if err != nil { if err != nil {
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err) utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
logger.Info("Uploading backup archive to Azure Blob storage ... done ") utils.Info("Uploading backup archive to Azure Blob storage ... done ")
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
@@ -98,10 +97,10 @@ func azureBackup(db *dbConfig, config *BackupConfig) {
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
logger.Info("Backup completed successfully") utils.Info("Backup completed successfully")
} }
func azureRestore(db *dbConfig, conf *RestoreConfig) { func azureRestore(db *dbConfig, conf *RestoreConfig) {
logger.Info("Restore database from Azure Blob storage") utils.Info("Restore database from Azure Blob storage")
azureConfig := loadAzureConfig() azureConfig := loadAzureConfig()
azureStorage, err := azure.NewStorage(azure.Config{ azureStorage, err := azure.NewStorage(azure.Config{
ContainerName: azureConfig.containerName, ContainerName: azureConfig.containerName,
@@ -111,12 +110,12 @@ func azureRestore(db *dbConfig, conf *RestoreConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
logger.Fatal("Error creating SSH storage: %s", err) utils.Fatal("Error creating SSH storage: %s", err)
} }
err = azureStorage.CopyFrom(conf.file) err = azureStorage.CopyFrom(conf.file)
if err != nil { if err != nil {
logger.Fatal("Error downloading backup file: %s", err) utils.Fatal("Error downloading backup file: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }

View File

@@ -22,13 +22,13 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package internal
package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/encryptor" "github.com/jkaninda/encryptor"
"github.com/jkaninda/go-storage/pkg/local" "github.com/jkaninda/go-storage/pkg/local"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -53,7 +53,7 @@ func StartBackup(cmd *cobra.Command) {
if utils.IsValidCronExpression(config.cronExpression) { if utils.IsValidCronExpression(config.cronExpression) {
scheduledMode(dbConf, config) scheduledMode(dbConf, config)
} else { } else {
logger.Fatal("Cron expression is not valid: %s", config.cronExpression) utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
} }
} }
} else { } else {
@@ -64,22 +64,22 @@ func StartBackup(cmd *cobra.Command) {
// scheduledMode Runs backup in scheduled mode // scheduledMode Runs backup in scheduled mode
func scheduledMode(db *dbConfig, config *BackupConfig) { func scheduledMode(db *dbConfig, config *BackupConfig) {
logger.Info("Running in Scheduled mode") utils.Info("Running in Scheduled mode")
logger.Info("Backup cron expression: %s", config.cronExpression) utils.Info("Backup cron expression: %s", config.cronExpression)
logger.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat)) utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
logger.Info("Storage type %s ", config.storage) utils.Info("Storage type %s ", config.storage)
// Test backup // Test backup
logger.Info("Testing backup configurations...") utils.Info("Testing backup configurations...")
testDatabaseConnection(db) testDatabaseConnection(db)
logger.Info("Testing backup configurations...done") utils.Info("Testing backup configurations...done")
logger.Info("Creating backup job...") utils.Info("Creating backup job...")
// Create a new cron instance // Create a new cron instance
c := cron.New() c := cron.New()
_, err := c.AddFunc(config.cronExpression, func() { _, err := c.AddFunc(config.cronExpression, func() {
BackupTask(db, config) BackupTask(db, config)
logger.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat)) utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
}) })
if err != nil { if err != nil {
@@ -87,8 +87,8 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
} }
// Start the cron scheduler // Start the cron scheduler
c.Start() c.Start()
logger.Info("Creating backup job...done") utils.Info("Creating backup job...done")
logger.Info("Backup job started") utils.Info("Backup job started")
defer c.Stop() defer c.Stop()
select {} select {}
} }
@@ -106,7 +106,7 @@ func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
// BackupTask backups database // BackupTask backups database
func BackupTask(db *dbConfig, config *BackupConfig) { func BackupTask(db *dbConfig, config *BackupConfig) {
logger.Info("Starting backup task...") utils.Info("Starting backup task...")
// Generate file name // Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405")) backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
if config.disableCompression { if config.disableCompression {
@@ -129,17 +129,17 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
} }
} }
func startMultiBackup(bkConfig *BackupConfig, configFile string) { func startMultiBackup(bkConfig *BackupConfig, configFile string) {
logger.Info("Starting backup task...") utils.Info("Starting backup task...")
conf, err := readConf(configFile) conf, err := readConf(configFile)
if err != nil { if err != nil {
logger.Fatal("Error reading config file: %s", err) utils.Fatal("Error reading config file: %s", err)
} }
// Check if cronExpression is defined in config file // Check if cronExpression is defined in config file
if conf.CronExpression != "" { if conf.CronExpression != "" {
bkConfig.cronExpression = conf.CronExpression bkConfig.cronExpression = conf.CronExpression
} }
if len(conf.Databases) == 0 { if len(conf.Databases) == 0 {
logger.Fatal("No databases found") utils.Fatal("No databases found")
} }
// Check if cronExpression is defined // Check if cronExpression is defined
if bkConfig.cronExpression == "" { if bkConfig.cronExpression == "" {
@@ -147,24 +147,24 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
} else { } else {
// Check if cronExpression is valid // Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) { if utils.IsValidCronExpression(bkConfig.cronExpression) {
logger.Info("Running backup in Scheduled mode") utils.Info("Running backup in Scheduled mode")
logger.Info("Backup cron expression: %s", bkConfig.cronExpression) utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
logger.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat)) utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
logger.Info("Storage type %s ", bkConfig.storage) utils.Info("Storage type %s ", bkConfig.storage)
// Test backup // Test backup
logger.Info("Testing backup configurations...") utils.Info("Testing backup configurations...")
for _, db := range conf.Databases { for _, db := range conf.Databases {
testDatabaseConnection(getDatabase(db)) testDatabaseConnection(getDatabase(db))
} }
logger.Info("Testing backup configurations...done") utils.Info("Testing backup configurations...done")
logger.Info("Creating backup job...") utils.Info("Creating backup job...")
// Create a new cron instance // Create a new cron instance
c := cron.New() c := cron.New()
_, err := c.AddFunc(bkConfig.cronExpression, func() { _, err := c.AddFunc(bkConfig.cronExpression, func() {
multiBackupTask(conf.Databases, bkConfig) multiBackupTask(conf.Databases, bkConfig)
logger.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat)) utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
}) })
if err != nil { if err != nil {
@@ -172,13 +172,13 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
} }
// Start the cron scheduler // Start the cron scheduler
c.Start() c.Start()
logger.Info("Creating backup job...done") utils.Info("Creating backup job...done")
logger.Info("Backup job started") utils.Info("Backup job started")
defer c.Stop() defer c.Stop()
select {} select {}
} else { } else {
logger.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression) utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
} }
} }
@@ -188,7 +188,7 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) { func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
storagePath = os.Getenv("STORAGE_PATH") storagePath = os.Getenv("STORAGE_PATH")
logger.Info("Starting database backup...") utils.Info("Starting database backup...")
err := os.Setenv("MYSQL_PWD", db.dbPassword) err := os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil { if err != nil {
@@ -196,7 +196,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
} }
testDatabaseConnection(db) testDatabaseConnection(db)
// Backup Database database // Backup Database database
logger.Info("Backing up database...") utils.Info("Backing up database...")
// Verify is compression is disabled // Verify is compression is disabled
if disableCompression { if disableCompression {
@@ -209,26 +209,26 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
) )
output, err := cmd.Output() output, err := cmd.Output()
if err != nil { if err != nil {
logger.Fatal(err.Error()) utils.Fatal(err.Error())
} }
// save output // save output
file, err := os.Create(filepath.Join(tmpPath, backupFileName)) file, err := os.Create(filepath.Join(tmpPath, backupFileName))
if err != nil { if err != nil {
logger.Fatal(err.Error()) utils.Fatal(err.Error())
} }
defer func(file *os.File) { defer func(file *os.File) {
err := file.Close() err := file.Close()
if err != nil { if err != nil {
logger.Fatal(err.Error()) utils.Fatal(err.Error())
} }
}(file) }(file)
_, err = file.Write(output) _, err = file.Write(output)
if err != nil { if err != nil {
logger.Fatal(err.Error()) utils.Fatal(err.Error())
} }
logger.Info("Database has been backed up") utils.Info("Database has been backed up")
} else { } else {
// Execute mysqldump // Execute mysqldump
@@ -250,12 +250,12 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
if err := gzipCmd.Wait(); err != nil { if err := gzipCmd.Wait(); err != nil {
log.Fatal(err) log.Fatal(err)
} }
logger.Info("Database has been backed up") utils.Info("Database has been backed up")
} }
} }
func localBackup(db *dbConfig, config *BackupConfig) { func localBackup(db *dbConfig, config *BackupConfig) {
logger.Info("Backup database to local storage") utils.Info("Backup database to local storage")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
BackupDatabase(db, config.backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName finalFileName := config.backupFileName
@@ -265,19 +265,19 @@ func localBackup(db *dbConfig, config *BackupConfig) {
} }
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
logger.Error("Error: %s", err) utils.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
logger.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
localStorage := local.NewStorage(local.Config{ localStorage := local.NewStorage(local.Config{
LocalPath: tmpPath, LocalPath: tmpPath,
RemotePath: storagePath, RemotePath: storagePath,
}) })
err = localStorage.Copy(finalFileName) err = localStorage.Copy(finalFileName)
if err != nil { if err != nil {
logger.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
logger.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName)) utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -292,40 +292,40 @@ func localBackup(db *dbConfig, config *BackupConfig) {
if config.prune { if config.prune {
err = localStorage.Prune(config.backupRetention) err = localStorage.Prune(config.backupRetention)
if err != nil { if err != nil {
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err) utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
// Delete temp // Delete temp
deleteTemp() deleteTemp()
logger.Info("Backup completed successfully") utils.Info("Backup completed successfully")
} }
func encryptBackup(config *BackupConfig) { func encryptBackup(config *BackupConfig) {
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName)) backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension) outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
if err != nil { if err != nil {
logger.Fatal("Error reading backup file: %s ", err) utils.Fatal("Error reading backup file: %s ", err)
} }
if config.usingKey { if config.usingKey {
logger.Info("Encrypting backup using public key...") utils.Info("Encrypting backup using public key...")
pubKey, err := os.ReadFile(config.publicKey) pubKey, err := os.ReadFile(config.publicKey)
if err != nil { if err != nil {
logger.Fatal("Error reading public key: %s ", err) utils.Fatal("Error reading public key: %s ", err)
} }
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey) err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
if err != nil { if err != nil {
logger.Fatal("Error encrypting backup file: %v ", err) utils.Fatal("Error encrypting backup file: %v ", err)
} }
logger.Info("Encrypting backup using public key...done") utils.Info("Encrypting backup using public key...done")
} else if config.passphrase != "" { } else if config.passphrase != "" {
logger.Info("Encrypting backup using passphrase...") utils.Info("Encrypting backup using passphrase...")
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase) err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
if err != nil { if err != nil {
logger.Fatal("error during encrypting backup %v", err) utils.Fatal("error during encrypting backup %v", err)
} }
logger.Info("Encrypting backup using passphrase...done") utils.Info("Encrypting backup using passphrase...done")
} }

View File

@@ -1,4 +1,3 @@
// Package internal /
/* /*
MIT License MIT License
@@ -22,11 +21,11 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package internal
package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
@@ -121,8 +120,8 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
err := utils.CheckEnvVars(dbHVars) err := utils.CheckEnvVars(dbHVars)
if err != nil { if err != nil {
logger.Error("Please make sure all required environment variables for database are set") utils.Error("Please make sure all required environment variables for database are set")
logger.Fatal("Error checking environment variables: %s", err) utils.Fatal("Error checking environment variables: %s", err)
} }
return &dConf return &dConf
} }
@@ -164,8 +163,8 @@ func loadFtpConfig() *FTPConfig {
fConfig.remotePath = os.Getenv("REMOTE_PATH") fConfig.remotePath = os.Getenv("REMOTE_PATH")
err := utils.CheckEnvVars(ftpVars) err := utils.CheckEnvVars(ftpVars)
if err != nil { if err != nil {
logger.Error("Please make sure all required environment variables for FTP are set") utils.Error("Please make sure all required environment variables for FTP are set")
logger.Fatal("Error missing environment variables: %s", err) utils.Fatal("Error missing environment variables: %s", err)
} }
return &fConfig return &fConfig
} }
@@ -178,8 +177,8 @@ func loadAzureConfig() *AzureConfig {
err := utils.CheckEnvVars(azureVars) err := utils.CheckEnvVars(azureVars)
if err != nil { if err != nil {
logger.Error("Please make sure all required environment variables for Azure Blob storage are set") utils.Error("Please make sure all required environment variables for Azure Blob storage are set")
logger.Fatal("Error missing environment variables: %s", err) utils.Fatal("Error missing environment variables: %s", err)
} }
return &aConfig return &aConfig
} }
@@ -206,8 +205,8 @@ func initAWSConfig() *AWSConfig {
aConfig.forcePathStyle = forcePathStyle aConfig.forcePathStyle = forcePathStyle
err = utils.CheckEnvVars(awsVars) err = utils.CheckEnvVars(awsVars)
if err != nil { if err != nil {
logger.Error("Please make sure all required environment variables for AWS S3 are set") utils.Error("Please make sure all required environment variables for AWS S3 are set")
logger.Fatal("Error checking environment variables: %s", err) utils.Fatal("Error checking environment variables: %s", err)
} }
return &aConfig return &aConfig
} }
@@ -304,8 +303,8 @@ func initTargetDbConfig() *targetDbConfig {
err := utils.CheckEnvVars(tdbRVars) err := utils.CheckEnvVars(tdbRVars)
if err != nil { if err != nil {
logger.Error("Please make sure all required environment variables for the target database are set") utils.Error("Please make sure all required environment variables for the target database are set")
logger.Fatal("Error checking target database environment variables: %s", err) utils.Fatal("Error checking target database environment variables: %s", err)
} }
return &tdbConfig return &tdbConfig
} }

View File

@@ -1,4 +1,3 @@
// Package internal /
/* /*
MIT License MIT License
@@ -22,12 +21,12 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package internal
package pkg
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
"os" "os"
@@ -44,7 +43,7 @@ func intro() {
// copyToTmp copy file to temporary directory // copyToTmp copy file to temporary directory
func deleteTemp() { func deleteTemp() {
logger.Info("Deleting %s ...", tmpPath) utils.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error { err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
return err return err
@@ -60,9 +59,9 @@ func deleteTemp() {
return nil return nil
}) })
if err != nil { if err != nil {
logger.Error("Error deleting files: %v", err) utils.Error("Error deleting files: %v", err)
} else { } else {
logger.Info("Deleting %s ... done", tmpPath) utils.Info("Deleting %s ... done", tmpPath)
} }
} }
@@ -72,7 +71,7 @@ func testDatabaseConnection(db *dbConfig) {
if err != nil { if err != nil {
return return
} }
logger.Info("Connecting to %s database ...", db.dbName) utils.Info("Connecting to %s database ...", db.dbName)
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit") cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
// Capture the output // Capture the output
var out bytes.Buffer var out bytes.Buffer
@@ -80,10 +79,10 @@ func testDatabaseConnection(db *dbConfig) {
cmd.Stderr = &out cmd.Stderr = &out
err = cmd.Run() err = cmd.Run()
if err != nil { if err != nil {
logger.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String()) utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
} }
logger.Info("Successfully connected to %s database", db.dbName) utils.Info("Successfully connected to %s database", db.dbName)
} }

View File

@@ -1,26 +0,0 @@
package logger
/*
MIT License
# Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
const traceLog = "trace"

View File

@@ -1,4 +1,3 @@
// Package internal /
/* /*
MIT License MIT License
@@ -22,18 +21,19 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package internal
package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"time" "time"
) )
func StartMigration(cmd *cobra.Command) { func StartMigration(cmd *cobra.Command) {
intro() intro()
logger.Info("Starting database migration...") utils.Info("Starting database migration...")
// Get DB config // Get DB config
dbConf = initDbConfig(cmd) dbConf = initDbConfig(cmd)
targetDbConf = initTargetDbConfig() targetDbConf = initTargetDbConfig()
@@ -53,8 +53,8 @@ func StartMigration(cmd *cobra.Command) {
// Backup source Database // Backup source Database
BackupDatabase(dbConf, backupFileName, true) BackupDatabase(dbConf, backupFileName, true)
// Restore source database into target database // Restore source database into target database
logger.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName) utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
RestoreDatabase(&newDbConfig, conf) RestoreDatabase(&newDbConfig, conf)
logger.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName) utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
logger.Info("Database migration completed.") utils.Info("Database migration completed.")
} }

View File

@@ -22,13 +22,12 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package internal package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/go-storage/pkg/ftp" "github.com/jkaninda/go-storage/pkg/ftp"
"github.com/jkaninda/go-storage/pkg/ssh" "github.com/jkaninda/go-storage/pkg/ssh"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"os" "os"
@@ -37,7 +36,7 @@ import (
) )
func sshBackup(db *dbConfig, config *BackupConfig) { func sshBackup(db *dbConfig, config *BackupConfig) {
logger.Info("Backup database to Remote server") utils.Info("Backup database to Remote server")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
BackupDatabase(db, config.backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
@@ -46,11 +45,11 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
encryptBackup(config) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
logger.Info("Uploading backup archive to remote storage ... ") utils.Info("Uploading backup archive to remote storage ... ")
logger.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
sshConfig, err := loadSSHConfig() sshConfig, err := loadSSHConfig()
if err != nil { if err != nil {
logger.Fatal("Error loading ssh config: %s", err) utils.Fatal("Error loading ssh config: %s", err)
} }
sshStorage, err := ssh.NewStorage(ssh.Config{ sshStorage, err := ssh.NewStorage(ssh.Config{
@@ -62,34 +61,34 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
logger.Fatal("Error creating SSH storage: %s", err) utils.Fatal("Error creating SSH storage: %s", err)
} }
err = sshStorage.Copy(finalFileName) err = sshStorage.Copy(finalFileName)
if err != nil { if err != nil {
logger.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
// Get backup info // Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
logger.Error("Error: %s", err) utils.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName)) utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Delete backup file from tmp folder // Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
logger.Error("Error deleting file: %v", err) utils.Error("Error deleting file: %v", err)
} }
if config.prune { if config.prune {
err := sshStorage.Prune(config.backupRetention) err := sshStorage.Prune(config.backupRetention)
if err != nil { if err != nil {
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err) utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
logger.Info("Uploading backup archive to remote storage ... done ") utils.Info("Uploading backup archive to remote storage ... done ")
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -102,11 +101,11 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
logger.Info("Backup completed successfully") utils.Info("Backup completed successfully")
} }
func ftpBackup(db *dbConfig, config *BackupConfig) { func ftpBackup(db *dbConfig, config *BackupConfig) {
logger.Info("Backup database to the remote FTP server") utils.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
@@ -116,8 +115,8 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
encryptBackup(config) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
logger.Info("Uploading backup archive to the remote FTP server ... ") utils.Info("Uploading backup archive to the remote FTP server ... ")
logger.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
ftpConfig := loadFtpConfig() ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{ ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host, Host: ftpConfig.host,
@@ -128,34 +127,34 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
logger.Fatal("Error creating SSH storage: %s", err) utils.Fatal("Error creating SSH storage: %s", err)
} }
err = ftpStorage.Copy(finalFileName) err = ftpStorage.Copy(finalFileName)
if err != nil { if err != nil {
logger.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName)) utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Get backup info // Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
logger.Error("Error: %s", err) utils.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
// Delete backup file from tmp folder // Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
logger.Error("Error deleting file: %v", err) utils.Error("Error deleting file: %v", err)
} }
if config.prune { if config.prune {
err := ftpStorage.Prune(config.backupRetention) err := ftpStorage.Prune(config.backupRetention)
if err != nil { if err != nil {
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err) utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
logger.Info("Uploading backup archive to the remote FTP server ... done ") utils.Info("Uploading backup archive to the remote FTP server ... done ")
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
@@ -169,13 +168,13 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
logger.Info("Backup completed successfully") utils.Info("Backup completed successfully")
} }
func remoteRestore(db *dbConfig, conf *RestoreConfig) { func remoteRestore(db *dbConfig, conf *RestoreConfig) {
logger.Info("Restore database from remote server") utils.Info("Restore database from remote server")
sshConfig, err := loadSSHConfig() sshConfig, err := loadSSHConfig()
if err != nil { if err != nil {
logger.Fatal("Error loading ssh config: %s", err) utils.Fatal("Error loading ssh config: %s", err)
} }
sshStorage, err := ssh.NewStorage(ssh.Config{ sshStorage, err := ssh.NewStorage(ssh.Config{
@@ -188,16 +187,16 @@ func remoteRestore(db *dbConfig, conf *RestoreConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
logger.Fatal("Error creating SSH storage: %s", err) utils.Fatal("Error creating SSH storage: %s", err)
} }
err = sshStorage.CopyFrom(conf.file) err = sshStorage.CopyFrom(conf.file)
if err != nil { if err != nil {
logger.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }
func ftpRestore(db *dbConfig, conf *RestoreConfig) { func ftpRestore(db *dbConfig, conf *RestoreConfig) {
logger.Info("Restore database from FTP server") utils.Info("Restore database from FTP server")
ftpConfig := loadFtpConfig() ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{ ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host, Host: ftpConfig.host,
@@ -208,11 +207,11 @@ func ftpRestore(db *dbConfig, conf *RestoreConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
logger.Fatal("Error creating SSH storage: %s", err) utils.Fatal("Error creating SSH storage: %s", err)
} }
err = ftpStorage.CopyFrom(conf.file) err = ftpStorage.CopyFrom(conf.file)
if err != nil { if err != nil {
logger.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }

View File

@@ -1,6 +1,3 @@
// Package internal /
package internal
/* /*
MIT License MIT License
@@ -24,10 +21,12 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package pkg
import ( import (
"github.com/jkaninda/encryptor" "github.com/jkaninda/encryptor"
"github.com/jkaninda/go-storage/pkg/local" "github.com/jkaninda/go-storage/pkg/local"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
@@ -56,14 +55,14 @@ func StartRestore(cmd *cobra.Command) {
} }
} }
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) { func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
logger.Info("Restore database from local") utils.Info("Restore database from local")
localStorage := local.NewStorage(local.Config{ localStorage := local.NewStorage(local.Config{
RemotePath: storagePath, RemotePath: storagePath,
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
err := localStorage.CopyFrom(restoreConf.file) err := localStorage.CopyFrom(restoreConf.file)
if err != nil { if err != nil {
logger.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
RestoreDatabase(dbConf, restoreConf) RestoreDatabase(dbConf, restoreConf)
@@ -72,41 +71,41 @@ func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
// RestoreDatabase restore database // RestoreDatabase restore database
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) { func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
if conf.file == "" { if conf.file == "" {
logger.Fatal("Error, file required") utils.Fatal("Error, file required")
} }
extension := filepath.Ext(filepath.Join(tmpPath, conf.file)) extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file)) rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file))
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file)) outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file))
if err != nil { if err != nil {
logger.Fatal("Error reading backup file: %s ", err) utils.Fatal("Error reading backup file: %s ", err)
} }
if extension == ".gpg" { if extension == ".gpg" {
if conf.usingKey { if conf.usingKey {
logger.Info("Decrypting backup using private key...") utils.Info("Decrypting backup using private key...")
logger.Warn("Backup decryption using a private key is not fully supported") utils.Warn("Backup decryption using a private key is not fully supported")
prKey, err := os.ReadFile(conf.privateKey) prKey, err := os.ReadFile(conf.privateKey)
if err != nil { if err != nil {
logger.Fatal("Error reading public key: %s ", err) utils.Fatal("Error reading public key: %s ", err)
} }
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase) err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
if err != nil { if err != nil {
logger.Fatal("error during decrypting backup %v", err) utils.Fatal("error during decrypting backup %v", err)
} }
logger.Info("Decrypting backup using private key...done") utils.Info("Decrypting backup using private key...done")
} else { } else {
if conf.passphrase == "" { if conf.passphrase == "" {
logger.Error("Error, passphrase or private key required") utils.Error("Error, passphrase or private key required")
logger.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.") utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
} else { } else {
logger.Info("Decrypting backup using passphrase...") utils.Info("Decrypting backup using passphrase...")
// decryptWithGPG file // decryptWithGPG file
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase) err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
if err != nil { if err != nil {
logger.Fatal("Error decrypting file %s %v", file, err) utils.Fatal("Error decrypting file %s %v", file, err)
} }
logger.Info("Decrypting backup using passphrase...done") utils.Info("Decrypting backup using passphrase...done")
// Update file name // Update file name
conf.file = RemoveLastExtension(file) conf.file = RemoveLastExtension(file)
} }
@@ -120,7 +119,7 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
return return
} }
testDatabaseConnection(db) testDatabaseConnection(db)
logger.Info("Restoring database...") utils.Info("Restoring database...")
extension := filepath.Ext(filepath.Join(tmpPath, conf.file)) extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
// Restore from compressed file / .sql.gz // Restore from compressed file / .sql.gz
@@ -128,10 +127,10 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("sh", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
logger.Fatal("Error, in restoring the database %v", err) utils.Fatal("Error, in restoring the database %v", err)
} }
logger.Info("Restoring database... done") utils.Info("Restoring database... done")
logger.Info("Database has been restored") utils.Info("Database has been restored")
// Delete temp // Delete temp
deleteTemp() deleteTemp()
@@ -140,17 +139,17 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("sh", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
logger.Fatal("Error in restoring the database %v", err) utils.Fatal("Error in restoring the database %v", err)
} }
logger.Info("Restoring database... done") utils.Info("Restoring database... done")
logger.Info("Database has been restored") utils.Info("Database has been restored")
// Delete temp // Delete temp
deleteTemp() deleteTemp()
} else { } else {
logger.Fatal("Unknown file extension %s", extension) utils.Fatal("Unknown file extension %s", extension)
} }
} else { } else {
logger.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file)) utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
} }
} }

View File

@@ -22,12 +22,11 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package internal package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/go-storage/pkg/s3" "github.com/jkaninda/go-storage/pkg/s3"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"os" "os"
@@ -37,7 +36,7 @@ import (
func s3Backup(db *dbConfig, config *BackupConfig) { func s3Backup(db *dbConfig, config *BackupConfig) {
logger.Info("Backup database to s3 storage") utils.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
BackupDatabase(db, config.backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
@@ -46,12 +45,12 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
encryptBackup(config) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
logger.Info("Uploading backup archive to remote storage S3 ... ") utils.Info("Uploading backup archive to remote storage S3 ... ")
awsConfig := initAWSConfig() awsConfig := initAWSConfig()
if config.remotePath == "" { if config.remotePath == "" {
config.remotePath = awsConfig.remotePath config.remotePath = awsConfig.remotePath
} }
logger.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
s3Storage, err := s3.NewStorage(s3.Config{ s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint, Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket, Bucket: awsConfig.bucket,
@@ -60,20 +59,20 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
Region: awsConfig.region, Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl, DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle, ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath, RemotePath: config.remotePath,
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
logger.Fatal("Error creating s3 storage: %s", err) utils.Fatal("Error creating s3 storage: %s", err)
} }
err = s3Storage.Copy(finalFileName) err = s3Storage.Copy(finalFileName)
if err != nil { if err != nil {
logger.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
// Get backup info // Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
logger.Error("Error: %s", err) utils.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
@@ -87,11 +86,11 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
if config.prune { if config.prune {
err := s3Storage.Prune(config.backupRetention) err := s3Storage.Prune(config.backupRetention)
if err != nil { if err != nil {
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err) utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName)) utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
logger.Info("Uploading backup archive to remote storage S3 ... done ") utils.Info("Uploading backup archive to remote storage S3 ... done ")
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -104,11 +103,11 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
logger.Info("Backup completed successfully") utils.Info("Backup completed successfully")
} }
func s3Restore(db *dbConfig, conf *RestoreConfig) { func s3Restore(db *dbConfig, conf *RestoreConfig) {
logger.Info("Restore database from s3") utils.Info("Restore database from s3")
awsConfig := initAWSConfig() awsConfig := initAWSConfig()
if conf.remotePath == "" { if conf.remotePath == "" {
conf.remotePath = awsConfig.remotePath conf.remotePath = awsConfig.remotePath
@@ -121,15 +120,15 @@ func s3Restore(db *dbConfig, conf *RestoreConfig) {
Region: awsConfig.region, Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl, DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle, ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath, RemotePath: conf.remotePath,
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
logger.Fatal("Error creating s3 storage: %s", err) utils.Fatal("Error creating s3 storage: %s", err)
} }
err = s3Storage.CopyFrom(conf.file) err = s3Storage.CopyFrom(conf.file)
if err != nil { if err != nil {
logger.Fatal("Error download file from S3 storage: %s", err) utils.Fatal("Error download file from S3 storage: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }

View File

@@ -1,4 +1,3 @@
// Package internal /
/* /*
MIT License MIT License
@@ -22,7 +21,8 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package internal
package pkg
const tmpPath = "/tmp/backup" const tmpPath = "/tmp/backup"
const gpgHome = "/config/gnupg" const gpgHome = "/config/gnupg"

View File

@@ -1,5 +1,3 @@
package utils
/* /*
MIT License MIT License
@@ -23,6 +21,9 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package utils
import "os" import "os"
type MailConfig struct { type MailConfig struct {

View File

@@ -1,4 +1,3 @@
// Package utils /
/* /*
MIT License MIT License
@@ -22,6 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package utils package utils
const RestoreExample = "restore --dbname database --file db_20231219_022941.sql.gz\n" + const RestoreExample = "restore --dbname database --file db_20231219_022941.sql.gz\n" +
@@ -32,3 +32,4 @@ const BackupExample = "backup --dbname database --disable-compression\n" +
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" + const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
"backup --dbname database --storage s3 --path /custom-path\n" + "backup --dbname database --storage s3 --path /custom-path\n" +
"restore --dbname database --file db_20231219_022941.sql.gz" "restore --dbname database --file db_20231219_022941.sql.gz"
const traceLog = "trace"

View File

@@ -1,13 +1,3 @@
package logger
import (
"fmt"
"log"
"os"
"runtime"
"strings"
)
/* /*
MIT License MIT License
@@ -32,6 +22,16 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package utils
import (
"fmt"
"log"
"os"
"runtime"
"strings"
)
// Info returns info log // Info returns info log
func Info(msg string, args ...interface{}) { func Info(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stdout")) log.SetOutput(getStd("/dev/stdout"))
@@ -54,7 +54,13 @@ func Error(msg string, args ...interface{}) {
func Fatal(msg string, args ...interface{}) { func Fatal(msg string, args ...interface{}) {
log.SetOutput(os.Stdout) log.SetOutput(os.Stdout)
// Format message if there are additional arguments
formattedMessage := msg
if len(args) > 0 {
formattedMessage = fmt.Sprintf(msg, args...)
}
logWithCaller("ERROR", msg, args...) logWithCaller("ERROR", msg, args...)
NotifyError(formattedMessage)
os.Exit(1) os.Exit(1)
} }

View File

@@ -1,5 +1,3 @@
package utils
/* /*
MIT License MIT License
@@ -24,13 +22,14 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package utils
import ( import (
"bytes" "bytes"
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/go-mail/mail" "github.com/go-mail/mail"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"html/template" "html/template"
"io" "io"
"net/http" "net/http"
@@ -56,7 +55,7 @@ func parseTemplate[T any](data T, fileName string) (string, error) {
} }
func SendEmail(subject, body string) error { func SendEmail(subject, body string) error {
logger.Info("Start sending email notification....") Info("Start sending email notification....")
config := loadMailConfig() config := loadMailConfig()
emails := strings.Split(config.MailTo, ",") emails := strings.Split(config.MailTo, ",")
m := mail.NewMessage() m := mail.NewMessage()
@@ -68,16 +67,16 @@ func SendEmail(subject, body string) error {
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls} d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
if err := d.DialAndSend(m); err != nil { if err := d.DialAndSend(m); err != nil {
logger.Error("Error could not send email : %v", err) Error("Error could not send email : %v", err)
return err return err
} }
logger.Info("Email notification has been sent") Info("Email notification has been sent")
return nil return nil
} }
func sendMessage(msg string) error { func sendMessage(msg string) error {
logger.Info("Sending Telegram notification... ") Info("Sending Telegram notification... ")
chatId := os.Getenv("TG_CHAT_ID") chatId := os.Getenv("TG_CHAT_ID")
body, _ := json.Marshal(map[string]string{ body, _ := json.Marshal(map[string]string{
"chat_id": chatId, "chat_id": chatId,
@@ -97,11 +96,11 @@ func sendMessage(msg string) error {
} }
code := response.StatusCode code := response.StatusCode
if code == 200 { if code == 200 {
logger.Info("Telegram notification has been sent") Info("Telegram notification has been sent")
return nil return nil
} else { } else {
body, _ := io.ReadAll(response.Body) body, _ := io.ReadAll(response.Body)
logger.Error("Error could not send message, error: %s", string(body)) Error("Error could not send message, error: %s", string(body))
return fmt.Errorf("error could not send message %s", string(body)) return fmt.Errorf("error could not send message %s", string(body))
} }
@@ -126,11 +125,11 @@ func NotifySuccess(notificationData *NotificationData) {
if err == nil { if err == nil {
body, err := parseTemplate(*notificationData, "email.tmpl") body, err := parseTemplate(*notificationData, "email.tmpl")
if err != nil { if err != nil {
logger.Error("Could not parse email template: %v", err) Error("Could not parse email template: %v", err)
} }
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification %s", notificationData.Database), body) err = SendEmail(fmt.Sprintf("✅ Database Backup Notification %s", notificationData.Database), body)
if err != nil { if err != nil {
logger.Error("Could not send email: %v", err) Error("Could not send email: %v", err)
} }
} }
// Telegram notification // Telegram notification
@@ -138,12 +137,12 @@ func NotifySuccess(notificationData *NotificationData) {
if err == nil { if err == nil {
message, err := parseTemplate(*notificationData, "telegram.tmpl") message, err := parseTemplate(*notificationData, "telegram.tmpl")
if err != nil { if err != nil {
logger.Error("Could not parse telegram template: %v", err) Error("Could not parse telegram template: %v", err)
} }
err = sendMessage(message) err = sendMessage(message)
if err != nil { if err != nil {
logger.Error("Could not send Telegram message: %v", err) Error("Could not send Telegram message: %v", err)
} }
} }
} }
@@ -170,11 +169,11 @@ func NotifyError(error string) {
BackupReference: os.Getenv("BACKUP_REFERENCE"), BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "email-error.tmpl") }, "email-error.tmpl")
if err != nil { if err != nil {
logger.Error("Could not parse error template: %v", err) Error("Could not parse error template: %v", err)
} }
err = SendEmail("🔴 Urgent: Database Backup Failure Notification", body) err = SendEmail("🔴 Urgent: Database Backup Failure Notification", body)
if err != nil { if err != nil {
logger.Error("Could not send email: %v", err) Error("Could not send email: %v", err)
} }
} }
// Telegram notification // Telegram notification
@@ -186,13 +185,13 @@ func NotifyError(error string) {
BackupReference: os.Getenv("BACKUP_REFERENCE"), BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "telegram-error.tmpl") }, "telegram-error.tmpl")
if err != nil { if err != nil {
logger.Error("Could not parse error template: %v", err) Error("Could not parse error template: %v", err)
} }
err = sendMessage(message) err = sendMessage(message)
if err != nil { if err != nil {
logger.Error("Could not send telegram message: %v", err) Error("Could not send telegram message: %v", err)
} }
} }
} }

View File

@@ -1,4 +1,3 @@
// Package utils /
/* /*
MIT License MIT License
@@ -22,11 +21,11 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
package utils package utils
import ( import (
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"io" "io"
@@ -36,7 +35,7 @@ import (
"time" "time"
) )
var Version = "development" var Version = ""
// FileExists checks if the file does exist // FileExists checks if the file does exist
func FileExists(filename string) bool { func FileExists(filename string) bool {
@@ -112,7 +111,7 @@ func CopyFile(src, dst string) error {
} }
func ChangePermission(filePath string, mod int) { func ChangePermission(filePath string, mod int) {
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil { if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
logger.Fatal("Error changing permissions of %s: %v\n", filePath, err) Fatal("Error changing permissions of %s: %v\n", filePath, err)
} }
} }
@@ -174,7 +173,7 @@ func GetEnvVariable(envName, oldEnvName string) string {
if err != nil { if err != nil {
return value return value
} }
logger.Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName) Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
} }
} }
return value return value
@@ -221,7 +220,7 @@ func GetIntEnv(envName string) int {
} }
ret, err := strconv.Atoi(val) ret, err := strconv.Atoi(val)
if err != nil { if err != nil {
logger.Error("Error: %v", err) Error("Error: %v", err)
} }
return ret return ret
} }
@@ -246,7 +245,7 @@ func CronNextTime(cronExpr string) time.Time {
// Parse the cron expression // Parse the cron expression
schedule, err := cron.ParseStandard(cronExpr) schedule, err := cron.ParseStandard(cronExpr)
if err != nil { if err != nil {
logger.Error("Error parsing cron expression: %s", err) Error("Error parsing cron expression: %s", err)
return time.Time{} return time.Time{}
} }
// Get the current time // Get the current time