refactor: refactoring of code

This commit is contained in:
Jonas Kaninda
2024-12-06 14:21:55 +01:00
parent 9016a9ec7a
commit afd4afc83b
19 changed files with 483 additions and 340 deletions

43
.golangci.yml Normal file
View File

@@ -0,0 +1,43 @@
run:
timeout: 5m
allow-parallel-runners: true
issues:
# don't skip warning about doc comments
# don't exclude the default set of lint
exclude-use-default: false
# restore some of the defaults
# (fill in the rest as needed)
exclude-rules:
- path: "internal/*"
linters:
- dupl
- lll
- goimports
linters:
disable-all: true
enable:
- dupl
- errcheck
- copyloopvar
- ginkgolinter
- goconst
- gocyclo
- gofmt
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- prealloc
- revive
- staticcheck
- typecheck
- unconvert
- unparam
- unused
linters-settings:
revive:
rules:
- name: comment-spacings

View File

@@ -1,5 +1,6 @@
FROM golang:1.23.2 AS build FROM golang:1.23.2 AS build
WORKDIR /app WORKDIR /app
ARG appVersion=""
# Copy the source code. # Copy the source code.
COPY . . COPY . .
@@ -7,7 +8,7 @@ COPY . .
RUN go mod download RUN go mod download
# Build # Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-X 'github.com/jkaninda/pg-bkup/utils.Version=${appVersion}'" -o /app/mysql-bkup
FROM alpine:3.20.3 FROM alpine:3.20.3
ENV TZ=UTC ENV TZ=UTC

View File

@@ -91,7 +91,6 @@ services:
networks: networks:
web: web:
``` ```
### Docker recurring backup ### Docker recurring backup
```shell ```shell

View File

@@ -26,6 +26,7 @@ package cmd
import ( import (
"github.com/jkaninda/mysql-bkup/internal" "github.com/jkaninda/mysql-bkup/internal"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -38,7 +39,7 @@ var BackupCmd = &cobra.Command{
if len(args) == 0 { if len(args) == 0 {
internal.StartBackup(cmd) internal.StartBackup(cmd)
} else { } else {
utils.Fatal(`"backup" accepts no argument %q`, args) logger.Fatal(`"backup" accepts no argument %q`, args)
} }
}, },
} }

View File

@@ -26,7 +26,7 @@ package cmd
import ( import (
"github.com/jkaninda/mysql-bkup/internal" "github.com/jkaninda/mysql-bkup/internal"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -37,7 +37,7 @@ var MigrateCmd = &cobra.Command{
if len(args) == 0 { if len(args) == 0 {
internal.StartMigration(cmd) internal.StartMigration(cmd)
} else { } else {
utils.Fatal(`"migrate" accepts no argument %q`, args) logger.Fatal(`"migrate" accepts no argument %q`, args)
} }

View File

@@ -25,6 +25,7 @@ SOFTWARE.
*/ */
import ( import (
"github.com/jkaninda/mysql-bkup/internal" "github.com/jkaninda/mysql-bkup/internal"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -37,7 +38,7 @@ var RestoreCmd = &cobra.Command{
if len(args) == 0 { if len(args) == 0 {
internal.StartRestore(cmd) internal.StartRestore(cmd)
} else { } else {
utils.Fatal(`"restore" accepts no argument %q`, args) logger.Fatal(`"restore" accepts no argument %q`, args)
} }

View File

@@ -6,23 +6,39 @@ nav_order: 1
# About mysql-bkup # About mysql-bkup
{:.no_toc} {:.no_toc}
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
It also supports __encrypting__ your backups using GPG.
Telegram and Email notifications on successful and failed backups. **MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
It supports a variety of storage options and ensures data security through GPG encryption.
## Features
We are open to receiving stars, PRs, and issues! - **Storage Options:**
- Local storage
- AWS S3 or any S3-compatible object storage
- FTP
- SSH-compatible storage
- **Data Security:**
- Backups can be encrypted using **GPG** to ensure confidentiality.
{: .fs-6 .fw-300 } - **Deployment Flexibility:**
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- Supports recurring backups of PostgreSQL databases when deployed:
- On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes.
--- - **Notifications:**
- Get real-time updates on backup success or failure via:
- **Telegram**
- **Email**
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes. ## Use Cases
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
- **Automated Recurring Backups:** Schedule regular backups for PostgreSQL databases.
- **Cross-Environment Migration:** Easily migrate your PostgreSQL databases across different environments using supported storage options.
- **Secure Backup Management:** Protect your data with Gmysql encryption.
It also supports database __encryption__ using GPG.
{: .note } {: .note }

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/jkaninda/mysql-bkup module github.com/jkaninda/mysql-bkup
go 1.22.5 go 1.23.2
require github.com/spf13/pflag v1.0.5 // indirect require github.com/spf13/pflag v1.0.5 // indirect

2
go.sum
View File

@@ -11,6 +11,8 @@ github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9Hu
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ=
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=

View File

@@ -31,6 +31,7 @@ import (
"github.com/jkaninda/go-storage/pkg/local" "github.com/jkaninda/go-storage/pkg/local"
"github.com/jkaninda/go-storage/pkg/s3" "github.com/jkaninda/go-storage/pkg/s3"
"github.com/jkaninda/go-storage/pkg/ssh" "github.com/jkaninda/go-storage/pkg/ssh"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -55,7 +56,7 @@ func StartBackup(cmd *cobra.Command) {
if utils.IsValidCronExpression(config.cronExpression) { if utils.IsValidCronExpression(config.cronExpression) {
scheduledMode(dbConf, config) scheduledMode(dbConf, config)
} else { } else {
utils.Fatal("Cron expression is not valid: %s", config.cronExpression) logger.Fatal("Cron expression is not valid: %s", config.cronExpression)
} }
} }
} else { } else {
@@ -66,22 +67,22 @@ func StartBackup(cmd *cobra.Command) {
// scheduledMode Runs backup in scheduled mode // scheduledMode Runs backup in scheduled mode
func scheduledMode(db *dbConfig, config *BackupConfig) { func scheduledMode(db *dbConfig, config *BackupConfig) {
utils.Info("Running in Scheduled mode") logger.Info("Running in Scheduled mode")
utils.Info("Backup cron expression: %s", config.cronExpression) logger.Info("Backup cron expression: %s", config.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat)) logger.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", config.storage) logger.Info("Storage type %s ", config.storage)
// Test backup // Test backup
utils.Info("Testing backup configurations...") logger.Info("Testing backup configurations...")
BackupTask(db, config) testDatabaseConnection(db)
utils.Info("Testing backup configurations...done") logger.Info("Testing backup configurations...done")
utils.Info("Creating backup job...") logger.Info("Creating backup job...")
// Create a new cron instance // Create a new cron instance
c := cron.New() c := cron.New()
_, err := c.AddFunc(config.cronExpression, func() { _, err := c.AddFunc(config.cronExpression, func() {
BackupTask(db, config) BackupTask(db, config)
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat)) logger.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
}) })
if err != nil { if err != nil {
@@ -89,8 +90,8 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
} }
// Start the cron scheduler // Start the cron scheduler
c.Start() c.Start()
utils.Info("Creating backup job...done") logger.Info("Creating backup job...done")
utils.Info("Backup job started") logger.Info("Backup job started")
defer c.Stop() defer c.Stop()
select {} select {}
} }
@@ -108,7 +109,7 @@ func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
// BackupTask backups database // BackupTask backups database
func BackupTask(db *dbConfig, config *BackupConfig) { func BackupTask(db *dbConfig, config *BackupConfig) {
utils.Info("Starting backup task...") logger.Info("Starting backup task...")
// Generate file name // Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405")) backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
if config.disableCompression { if config.disableCompression {
@@ -129,37 +130,42 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
} }
} }
func startMultiBackup(bkConfig *BackupConfig, configFile string) { func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Starting backup task...") logger.Info("Starting backup task...")
conf, err := readConf(configFile) conf, err := readConf(configFile)
if err != nil { if err != nil {
utils.Fatal("Error reading config file: %s", err) logger.Fatal("Error reading config file: %s", err)
} }
// Check if cronExpression is defined in config file // Check if cronExpression is defined in config file
if conf.CronExpression != "" { if conf.CronExpression != "" {
bkConfig.cronExpression = conf.CronExpression bkConfig.cronExpression = conf.CronExpression
} }
if len(conf.Databases) == 0 {
logger.Fatal("No databases found")
}
// Check if cronExpression is defined // Check if cronExpression is defined
if bkConfig.cronExpression == "" { if bkConfig.cronExpression == "" {
multiBackupTask(conf.Databases, bkConfig) multiBackupTask(conf.Databases, bkConfig)
} else { } else {
// Check if cronExpression is valid // Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) { if utils.IsValidCronExpression(bkConfig.cronExpression) {
utils.Info("Running backup in Scheduled mode") logger.Info("Running backup in Scheduled mode")
utils.Info("Backup cron expression: %s", bkConfig.cronExpression) logger.Info("Backup cron expression: %s", bkConfig.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat)) logger.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", bkConfig.storage) logger.Info("Storage type %s ", bkConfig.storage)
// Test backup // Test backup
utils.Info("Testing backup configurations...") logger.Info("Testing backup configurations...")
multiBackupTask(conf.Databases, bkConfig) for _, db := range conf.Databases {
utils.Info("Testing backup configurations...done") testDatabaseConnection(getDatabase(db))
utils.Info("Creating backup job...") }
logger.Info("Testing backup configurations...done")
logger.Info("Creating backup job...")
// Create a new cron instance // Create a new cron instance
c := cron.New() c := cron.New()
_, err := c.AddFunc(bkConfig.cronExpression, func() { _, err := c.AddFunc(bkConfig.cronExpression, func() {
multiBackupTask(conf.Databases, bkConfig) multiBackupTask(conf.Databases, bkConfig)
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat)) logger.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
}) })
if err != nil { if err != nil {
@@ -167,13 +173,13 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
} }
// Start the cron scheduler // Start the cron scheduler
c.Start() c.Start()
utils.Info("Creating backup job...done") logger.Info("Creating backup job...done")
utils.Info("Backup job started") logger.Info("Backup job started")
defer c.Stop() defer c.Stop()
select {} select {}
} else { } else {
utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression) logger.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
} }
} }
@@ -181,10 +187,9 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
// BackupDatabase backup database // BackupDatabase backup database
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) { func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
storagePath = os.Getenv("STORAGE_PATH") storagePath = os.Getenv("STORAGE_PATH")
utils.Info("Starting database backup...") logger.Info("Starting database backup...")
err := os.Setenv("MYSQL_PWD", db.dbPassword) err := os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil { if err != nil {
@@ -192,7 +197,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
} }
testDatabaseConnection(db) testDatabaseConnection(db)
// Backup Database database // Backup Database database
utils.Info("Backing up database...") logger.Info("Backing up database...")
// Verify is compression is disabled // Verify is compression is disabled
if disableCompression { if disableCompression {
@@ -205,21 +210,26 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
) )
output, err := cmd.Output() output, err := cmd.Output()
if err != nil { if err != nil {
log.Fatal(err) logger.Fatal(err.Error())
} }
// save output // save output
file, err := os.Create(filepath.Join(tmpPath, backupFileName)) file, err := os.Create(filepath.Join(tmpPath, backupFileName))
if err != nil { if err != nil {
log.Fatal(err) logger.Fatal(err.Error())
} }
defer file.Close() defer func(file *os.File) {
err := file.Close()
if err != nil {
logger.Fatal(err.Error())
}
}(file)
_, err = file.Write(output) _, err = file.Write(output)
if err != nil { if err != nil {
log.Fatal(err) logger.Fatal(err.Error())
} }
utils.Info("Database has been backed up") logger.Info("Database has been backed up")
} else { } else {
// Execute mysqldump // Execute mysqldump
@@ -231,9 +241,9 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
gzipCmd := exec.Command("gzip") gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout gzipCmd.Stdin = stdout
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName)) gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
gzipCmd.Start() err = gzipCmd.Start()
if err != nil { if err != nil {
log.Fatal(err) return
} }
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
log.Fatal(err) log.Fatal(err)
@@ -241,12 +251,12 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
if err := gzipCmd.Wait(); err != nil { if err := gzipCmd.Wait(); err != nil {
log.Fatal(err) log.Fatal(err)
} }
utils.Info("Database has been backed up") logger.Info("Database has been backed up")
} }
} }
func localBackup(db *dbConfig, config *BackupConfig) { func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage") logger.Info("Backup database to local storage")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
BackupDatabase(db, config.backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName finalFileName := config.backupFileName
@@ -256,19 +266,19 @@ func localBackup(db *dbConfig, config *BackupConfig) {
} }
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
utils.Error("Error: %s", err) logger.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
utils.Info("Backup name is %s", finalFileName) logger.Info("Backup name is %s", finalFileName)
localStorage := local.NewStorage(local.Config{ localStorage := local.NewStorage(local.Config{
LocalPath: tmpPath, LocalPath: tmpPath,
RemotePath: storagePath, RemotePath: storagePath,
}) })
err = localStorage.Copy(finalFileName) err = localStorage.Copy(finalFileName)
if err != nil { if err != nil {
utils.Fatal("Error copying backup file: %s", err) logger.Fatal("Error copying backup file: %s", err)
} }
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName)) logger.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -283,18 +293,18 @@ func localBackup(db *dbConfig, config *BackupConfig) {
if config.prune { if config.prune {
err = localStorage.Prune(config.backupRetention) err = localStorage.Prune(config.backupRetention)
if err != nil { if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err) logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
// Delete temp // Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") logger.Info("Backup completed successfully")
} }
func s3Backup(db *dbConfig, config *BackupConfig) { func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to s3 storage") logger.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
BackupDatabase(db, config.backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
@@ -303,12 +313,12 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
encryptBackup(config) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup archive to remote storage S3 ... ") logger.Info("Uploading backup archive to remote storage S3 ... ")
awsConfig := initAWSConfig() awsConfig := initAWSConfig()
if config.remotePath == "" { if config.remotePath == "" {
config.remotePath = awsConfig.remotePath config.remotePath = awsConfig.remotePath
} }
utils.Info("Backup name is %s", finalFileName) logger.Info("Backup name is %s", finalFileName)
s3Storage, err := s3.NewStorage(s3.Config{ s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint, Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket, Bucket: awsConfig.bucket,
@@ -321,16 +331,16 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
utils.Fatal("Error creating s3 storage: %s", err) logger.Fatal("Error creating s3 storage: %s", err)
} }
err = s3Storage.Copy(finalFileName) err = s3Storage.Copy(finalFileName)
if err != nil { if err != nil {
utils.Fatal("Error copying backup file: %s", err) logger.Fatal("Error copying backup file: %s", err)
} }
// Get backup info // Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
utils.Error("Error: %s", err) logger.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
@@ -344,11 +354,11 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
if config.prune { if config.prune {
err := s3Storage.Prune(config.backupRetention) err := s3Storage.Prune(config.backupRetention)
if err != nil { if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err) logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName)) logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
utils.Info("Uploading backup archive to remote storage S3 ... done ") logger.Info("Uploading backup archive to remote storage S3 ... done ")
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -361,11 +371,11 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") logger.Info("Backup completed successfully")
} }
func sshBackup(db *dbConfig, config *BackupConfig) { func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server") logger.Info("Backup database to Remote server")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
BackupDatabase(db, config.backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
@@ -374,11 +384,11 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
encryptBackup(config) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup archive to remote storage ... ") logger.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName) logger.Info("Backup name is %s", finalFileName)
sshConfig, err := loadSSHConfig() sshConfig, err := loadSSHConfig()
if err != nil { if err != nil {
utils.Fatal("Error loading ssh config: %s", err) logger.Fatal("Error loading ssh config: %s", err)
} }
sshStorage, err := ssh.NewStorage(ssh.Config{ sshStorage, err := ssh.NewStorage(ssh.Config{
@@ -386,39 +396,38 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
Port: sshConfig.port, Port: sshConfig.port,
User: sshConfig.user, User: sshConfig.user,
Password: sshConfig.password, Password: sshConfig.password,
IdentifyFile: sshConfig.identifyFile,
RemotePath: config.remotePath, RemotePath: config.remotePath,
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
utils.Fatal("Error creating SSH storage: %s", err) logger.Fatal("Error creating SSH storage: %s", err)
} }
err = sshStorage.Copy(finalFileName) err = sshStorage.Copy(finalFileName)
if err != nil { if err != nil {
utils.Fatal("Error copying backup file: %s", err) logger.Fatal("Error copying backup file: %s", err)
} }
// Get backup info // Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
utils.Error("Error: %s", err) logger.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName)) logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Delete backup file from tmp folder // Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
utils.Error("Error deleting file: %v", err) logger.Error("Error deleting file: %v", err)
} }
if config.prune { if config.prune {
err := sshStorage.Prune(config.backupRetention) err := sshStorage.Prune(config.backupRetention)
if err != nil { if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err) logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
utils.Info("Uploading backup archive to remote storage ... done ") logger.Info("Uploading backup archive to remote storage ... done ")
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -431,11 +440,11 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") logger.Info("Backup completed successfully")
} }
func ftpBackup(db *dbConfig, config *BackupConfig) { func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server") logger.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat()) startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
@@ -445,8 +454,8 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
encryptBackup(config) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup archive to the remote FTP server ... ") logger.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName) logger.Info("Backup name is %s", finalFileName)
ftpConfig := loadFtpConfig() ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{ ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host, Host: ftpConfig.host,
@@ -457,34 +466,34 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
utils.Fatal("Error creating SSH storage: %s", err) logger.Fatal("Error creating SSH storage: %s", err)
} }
err = ftpStorage.Copy(finalFileName) err = ftpStorage.Copy(finalFileName)
if err != nil { if err != nil {
utils.Fatal("Error copying backup file: %s", err) logger.Fatal("Error copying backup file: %s", err)
} }
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName)) logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Get backup info // Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName)) fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
utils.Error("Error: %s", err) logger.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
// Delete backup file from tmp folder // Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil { if err != nil {
utils.Error("Error deleting file: %v", err) logger.Error("Error deleting file: %v", err)
} }
if config.prune { if config.prune {
err := ftpStorage.Prune(config.backupRetention) err := ftpStorage.Prune(config.backupRetention)
if err != nil { if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err) logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
} }
} }
utils.Info("Uploading backup archive to the remote FTP server ... done ") logger.Info("Uploading backup archive to the remote FTP server ... done ")
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
@@ -498,34 +507,34 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") logger.Info("Backup completed successfully")
} }
func encryptBackup(config *BackupConfig) { func encryptBackup(config *BackupConfig) {
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName)) backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension) outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
if err != nil { if err != nil {
utils.Fatal("Error reading backup file: %s ", err) logger.Fatal("Error reading backup file: %s ", err)
} }
if config.usingKey { if config.usingKey {
utils.Info("Encrypting backup using public key...") logger.Info("Encrypting backup using public key...")
pubKey, err := os.ReadFile(config.publicKey) pubKey, err := os.ReadFile(config.publicKey)
if err != nil { if err != nil {
utils.Fatal("Error reading public key: %s ", err) logger.Fatal("Error reading public key: %s ", err)
} }
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey) err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
if err != nil { if err != nil {
utils.Fatal("Error encrypting backup file: %v ", err) logger.Fatal("Error encrypting backup file: %v ", err)
} }
utils.Info("Encrypting backup using public key...done") logger.Info("Encrypting backup using public key...done")
} else if config.passphrase != "" { } else if config.passphrase != "" {
utils.Info("Encrypting backup using passphrase...") logger.Info("Encrypting backup using passphrase...")
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase) err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
if err != nil { if err != nil {
utils.Fatal("error during encrypting backup %v", err) logger.Fatal("error during encrypting backup %v", err)
} }
utils.Info("Encrypting backup using passphrase...done") logger.Info("Encrypting backup using passphrase...done")
} }

View File

@@ -26,6 +26,7 @@ package internal
import ( import (
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
@@ -115,8 +116,8 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
err := utils.CheckEnvVars(dbHVars) err := utils.CheckEnvVars(dbHVars)
if err != nil { if err != nil {
utils.Error("Please make sure all required environment variables for database are set") logger.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err) logger.Fatal("Error checking environment variables: %s", err)
} }
return &dConf return &dConf
} }
@@ -158,8 +159,8 @@ func loadFtpConfig() *FTPConfig {
fConfig.remotePath = os.Getenv("REMOTE_PATH") fConfig.remotePath = os.Getenv("REMOTE_PATH")
err := utils.CheckEnvVars(ftpVars) err := utils.CheckEnvVars(ftpVars)
if err != nil { if err != nil {
utils.Error("Please make sure all required environment variables for FTP are set") logger.Error("Please make sure all required environment variables for FTP are set")
utils.Fatal("Error missing environment variables: %s", err) logger.Fatal("Error missing environment variables: %s", err)
} }
return &fConfig return &fConfig
} }
@@ -185,8 +186,8 @@ func initAWSConfig() *AWSConfig {
aConfig.forcePathStyle = forcePathStyle aConfig.forcePathStyle = forcePathStyle
err = utils.CheckEnvVars(awsVars) err = utils.CheckEnvVars(awsVars)
if err != nil { if err != nil {
utils.Error("Please make sure all required environment variables for AWS S3 are set") logger.Error("Please make sure all required environment variables for AWS S3 are set")
utils.Fatal("Error checking environment variables: %s", err) logger.Fatal("Error checking environment variables: %s", err)
} }
return &aConfig return &aConfig
} }
@@ -276,15 +277,15 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
func initTargetDbConfig() *targetDbConfig { func initTargetDbConfig() *targetDbConfig {
tdbConfig := targetDbConfig{} tdbConfig := targetDbConfig{}
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST") tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "5432") tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "3306")
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME") tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME") tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD") tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
err := utils.CheckEnvVars(tdbRVars) err := utils.CheckEnvVars(tdbRVars)
if err != nil { if err != nil {
utils.Error("Please make sure all required environment variables for the target database are set") logger.Error("Please make sure all required environment variables for the target database are set")
utils.Fatal("Error checking target database environment variables: %s", err) logger.Fatal("Error checking target database environment variables: %s", err)
} }
return &tdbConfig return &tdbConfig
} }

View File

@@ -27,6 +27,7 @@ package internal
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
"os" "os"
@@ -36,13 +37,14 @@ import (
) )
func intro() { func intro() {
utils.Info("Starting MySQL Backup...") fmt.Println("Starting MySQL Backup...")
utils.Info("Copyright (c) 2024 Jonas Kaninda ") fmt.Printf("Version: %s\n", utils.Version)
fmt.Println("Copyright (c) 2024 Jonas Kaninda")
} }
// copyToTmp copy file to temporary directory // copyToTmp copy file to temporary directory
func deleteTemp() { func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath) logger.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error { err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
return err return err
@@ -58,9 +60,9 @@ func deleteTemp() {
return nil return nil
}) })
if err != nil { if err != nil {
utils.Error("Error deleting files: %v", err) logger.Error("Error deleting files: %v", err)
} else { } else {
utils.Info("Deleting %s ... done", tmpPath) logger.Info("Deleting %s ... done", tmpPath)
} }
} }
@@ -70,7 +72,7 @@ func testDatabaseConnection(db *dbConfig) {
if err != nil { if err != nil {
return return
} }
utils.Info("Connecting to %s database ...", db.dbName) logger.Info("Connecting to %s database ...", db.dbName)
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit") cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
// Capture the output // Capture the output
var out bytes.Buffer var out bytes.Buffer
@@ -78,10 +80,10 @@ func testDatabaseConnection(db *dbConfig) {
cmd.Stderr = &out cmd.Stderr = &out
err = cmd.Run() err = cmd.Run()
if err != nil { if err != nil {
utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String()) logger.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
} }
utils.Info("Successfully connected to %s database", db.dbName) logger.Info("Successfully connected to %s database", db.dbName)
} }

View File

@@ -26,14 +26,14 @@ package internal
import ( import (
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"time" "time"
) )
func StartMigration(cmd *cobra.Command) { func StartMigration(cmd *cobra.Command) {
intro() intro()
utils.Info("Starting database migration...") logger.Info("Starting database migration...")
// Get DB config // Get DB config
dbConf = initDbConfig(cmd) dbConf = initDbConfig(cmd)
targetDbConf = initTargetDbConfig() targetDbConf = initTargetDbConfig()
@@ -53,8 +53,8 @@ func StartMigration(cmd *cobra.Command) {
// Backup source Database // Backup source Database
BackupDatabase(dbConf, backupFileName, true) BackupDatabase(dbConf, backupFileName, true)
// Restore source database into target database // Restore source database into target database
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName) logger.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
RestoreDatabase(&newDbConfig, conf) RestoreDatabase(&newDbConfig, conf)
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName) logger.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
utils.Info("Database migration completed.") logger.Info("Database migration completed.")
} }

View File

@@ -1,9 +1,4 @@
// Package internal / // Package internal /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package internal package internal
/* /*
@@ -35,6 +30,7 @@ import (
"github.com/jkaninda/go-storage/pkg/local" "github.com/jkaninda/go-storage/pkg/local"
"github.com/jkaninda/go-storage/pkg/s3" "github.com/jkaninda/go-storage/pkg/s3"
"github.com/jkaninda/go-storage/pkg/ssh" "github.com/jkaninda/go-storage/pkg/ssh"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
@@ -61,20 +57,20 @@ func StartRestore(cmd *cobra.Command) {
} }
} }
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) { func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
utils.Info("Restore database from local") logger.Info("Restore database from local")
localStorage := local.NewStorage(local.Config{ localStorage := local.NewStorage(local.Config{
RemotePath: storagePath, RemotePath: storagePath,
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
err := localStorage.CopyFrom(restoreConf.file) err := localStorage.CopyFrom(restoreConf.file)
if err != nil { if err != nil {
utils.Fatal("Error copying backup file: %s", err) logger.Fatal("Error copying backup file: %s", err)
} }
RestoreDatabase(dbConf, restoreConf) RestoreDatabase(dbConf, restoreConf)
} }
func restoreFromS3(db *dbConfig, conf *RestoreConfig) { func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from s3") logger.Info("Restore database from s3")
awsConfig := initAWSConfig() awsConfig := initAWSConfig()
if conf.remotePath == "" { if conf.remotePath == "" {
conf.remotePath = awsConfig.remotePath conf.remotePath = awsConfig.remotePath
@@ -91,19 +87,19 @@ func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
utils.Fatal("Error creating s3 storage: %s", err) logger.Fatal("Error creating s3 storage: %s", err)
} }
err = s3Storage.CopyFrom(conf.file) err = s3Storage.CopyFrom(conf.file)
if err != nil { if err != nil {
utils.Fatal("Error download file from S3 storage: %s", err) logger.Fatal("Error download file from S3 storage: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) { func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from remote server") logger.Info("Restore database from remote server")
sshConfig, err := loadSSHConfig() sshConfig, err := loadSSHConfig()
if err != nil { if err != nil {
utils.Fatal("Error loading ssh config: %s", err) logger.Fatal("Error loading ssh config: %s", err)
} }
sshStorage, err := ssh.NewStorage(ssh.Config{ sshStorage, err := ssh.NewStorage(ssh.Config{
@@ -111,20 +107,21 @@ func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
Port: sshConfig.port, Port: sshConfig.port,
User: sshConfig.user, User: sshConfig.user,
Password: sshConfig.password, Password: sshConfig.password,
IdentifyFile: sshConfig.identifyFile,
RemotePath: conf.remotePath, RemotePath: conf.remotePath,
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
utils.Fatal("Error creating SSH storage: %s", err) logger.Fatal("Error creating SSH storage: %s", err)
} }
err = sshStorage.CopyFrom(conf.file) err = sshStorage.CopyFrom(conf.file)
if err != nil { if err != nil {
utils.Fatal("Error copying backup file: %s", err) logger.Fatal("Error copying backup file: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) { func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from FTP server") logger.Info("Restore database from FTP server")
ftpConfig := loadFtpConfig() ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{ ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host, Host: ftpConfig.host,
@@ -135,11 +132,11 @@ func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
if err != nil { if err != nil {
utils.Fatal("Error creating SSH storage: %s", err) logger.Fatal("Error creating SSH storage: %s", err)
} }
err = ftpStorage.CopyFrom(conf.file) err = ftpStorage.CopyFrom(conf.file)
if err != nil { if err != nil {
utils.Fatal("Error copying backup file: %s", err) logger.Fatal("Error copying backup file: %s", err)
} }
RestoreDatabase(db, conf) RestoreDatabase(db, conf)
} }
@@ -147,41 +144,41 @@ func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
// RestoreDatabase restore database // RestoreDatabase restore database
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) { func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
if conf.file == "" { if conf.file == "" {
utils.Fatal("Error, file required") logger.Fatal("Error, file required")
} }
extension := filepath.Ext(filepath.Join(tmpPath, conf.file)) extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file)) rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file))
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file)) outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file))
if err != nil { if err != nil {
utils.Fatal("Error reading backup file: %s ", err) logger.Fatal("Error reading backup file: %s ", err)
} }
if extension == ".gpg" { if extension == ".gpg" {
if conf.usingKey { if conf.usingKey {
utils.Info("Decrypting backup using private key...") logger.Info("Decrypting backup using private key...")
utils.Warn("Backup decryption using a private key is not fully supported") logger.Warn("Backup decryption using a private key is not fully supported")
prKey, err := os.ReadFile(conf.privateKey) prKey, err := os.ReadFile(conf.privateKey)
if err != nil { if err != nil {
utils.Fatal("Error reading public key: %s ", err) logger.Fatal("Error reading public key: %s ", err)
} }
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase) err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
if err != nil { if err != nil {
utils.Fatal("error during decrypting backup %v", err) logger.Fatal("error during decrypting backup %v", err)
} }
utils.Info("Decrypting backup using private key...done") logger.Info("Decrypting backup using private key...done")
} else { } else {
if conf.passphrase == "" { if conf.passphrase == "" {
utils.Error("Error, passphrase or private key required") logger.Error("Error, passphrase or private key required")
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.") logger.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
} else { } else {
utils.Info("Decrypting backup using passphrase...") logger.Info("Decrypting backup using passphrase...")
// decryptWithGPG file // decryptWithGPG file
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase) err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
if err != nil { if err != nil {
utils.Fatal("Error decrypting file %s %v", file, err) logger.Fatal("Error decrypting file %s %v", file, err)
} }
utils.Info("Decrypting backup using passphrase...done") logger.Info("Decrypting backup using passphrase...done")
// Update file name // Update file name
conf.file = RemoveLastExtension(file) conf.file = RemoveLastExtension(file)
} }
@@ -195,7 +192,7 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
return return
} }
testDatabaseConnection(db) testDatabaseConnection(db)
utils.Info("Restoring database...") logger.Info("Restoring database...")
extension := filepath.Ext(filepath.Join(tmpPath, conf.file)) extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
// Restore from compressed file / .sql.gz // Restore from compressed file / .sql.gz
@@ -203,10 +200,10 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("sh", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal("Error, in restoring the database %v", err) logger.Fatal("Error, in restoring the database %v", err)
} }
utils.Info("Restoring database... done") logger.Info("Restoring database... done")
utils.Info("Database has been restored") logger.Info("Database has been restored")
// Delete temp // Delete temp
deleteTemp() deleteTemp()
@@ -215,17 +212,17 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("sh", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal("Error in restoring the database %v", err) logger.Fatal("Error in restoring the database %v", err)
} }
utils.Info("Restoring database... done") logger.Info("Restoring database... done")
utils.Info("Database has been restored") logger.Info("Database has been restored")
// Delete temp // Delete temp
deleteTemp() deleteTemp()
} else { } else {
utils.Fatal("Unknown file extension %s", extension) logger.Fatal("Unknown file extension %s", extension)
} }
} else { } else {
utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file)) logger.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
} }
} }

97
pkg/logger/logger.go Normal file
View File

@@ -0,0 +1,97 @@
package logger
import (
"fmt"
"log"
"os"
"runtime"
"strings"
)
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// Info returns info log
func Info(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stdout"))
logWithCaller("INFO", msg, args...)
}
// Warn returns warning log
func Warn(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stdout"))
logWithCaller("WARN", msg, args...)
}
// Error logs error messages
func Error(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stderr"))
logWithCaller("ERROR", msg, args...)
}
func Fatal(msg string, args ...interface{}) {
log.SetOutput(os.Stdout)
logWithCaller("ERROR", msg, args...)
os.Exit(1)
}
// Helper function to format and log messages with file and line number
func logWithCaller(level, msg string, args ...interface{}) {
// Format message if there are additional arguments
formattedMessage := msg
if len(args) > 0 {
formattedMessage = fmt.Sprintf(msg, args...)
}
// Get the caller's file and line number (skip 2 frames)
_, file, line, ok := runtime.Caller(2)
if !ok {
file = "unknown"
line = 0
}
// Log message with caller information if GOMA_LOG_LEVEL is trace
if strings.ToLower(level) != "off" {
if strings.ToLower(level) == traceLog {
log.Printf("%s: %s (File: %s, Line: %d)\n", level, formattedMessage, file, line)
} else {
log.Printf("%s: %s\n", level, formattedMessage)
}
}
}
func getStd(out string) *os.File {
switch out {
case "/dev/stdout":
return os.Stdout
case "/dev/stderr":
return os.Stderr
case "/dev/stdin":
return os.Stdin
default:
return os.Stdout
}
}

26
pkg/logger/var.go Normal file
View File

@@ -0,0 +1,26 @@
package logger
/*
MIT License
# Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
const traceLog = "trace"

View File

@@ -1,78 +0,0 @@
// Package utils /
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import (
"fmt"
"os"
"time"
)
func Info(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
}
// Warn warning message
func Warn(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s WARN: %s\n", currentTime, msg)
} else {
fmt.Printf("%s WARN: %s\n", currentTime, formattedMessage)
}
}
func Error(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
}
}
// Fatal logs an error message and exits the program
func Fatal(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
// Fatal logs an error message and exits the program.
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
NotifyError(msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
NotifyError(formattedMessage)
}
os.Exit(1)
}

View File

@@ -1,20 +1,5 @@
package utils package utils
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/go-mail/mail"
"html/template"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
/* /*
MIT License MIT License
@@ -39,6 +24,22 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/ */
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/go-mail/mail"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"html/template"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
func parseTemplate[T any](data T, fileName string) (string, error) { func parseTemplate[T any](data T, fileName string) (string, error) {
// Open the file // Open the file
tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName)) tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName))
@@ -55,7 +56,7 @@ func parseTemplate[T any](data T, fileName string) (string, error) {
} }
func SendEmail(subject, body string) error { func SendEmail(subject, body string) error {
Info("Start sending email notification....") logger.Info("Start sending email notification....")
config := loadMailConfig() config := loadMailConfig()
emails := strings.Split(config.MailTo, ",") emails := strings.Split(config.MailTo, ",")
m := mail.NewMessage() m := mail.NewMessage()
@@ -67,16 +68,16 @@ func SendEmail(subject, body string) error {
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls} d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
if err := d.DialAndSend(m); err != nil { if err := d.DialAndSend(m); err != nil {
Error("Error could not send email : %v", err) logger.Error("Error could not send email : %v", err)
return err return err
} }
Info("Email notification has been sent") logger.Info("Email notification has been sent")
return nil return nil
} }
func sendMessage(msg string) error { func sendMessage(msg string) error {
Info("Sending Telegram notification... ") logger.Info("Sending Telegram notification... ")
chatId := os.Getenv("TG_CHAT_ID") chatId := os.Getenv("TG_CHAT_ID")
body, _ := json.Marshal(map[string]string{ body, _ := json.Marshal(map[string]string{
"chat_id": chatId, "chat_id": chatId,
@@ -96,11 +97,11 @@ func sendMessage(msg string) error {
} }
code := response.StatusCode code := response.StatusCode
if code == 200 { if code == 200 {
Info("Telegram notification has been sent") logger.Info("Telegram notification has been sent")
return nil return nil
} else { } else {
body, _ := ioutil.ReadAll(response.Body) body, _ := io.ReadAll(response.Body)
Error("Error could not send message, error: %s", string(body)) logger.Error("Error could not send message, error: %s", string(body))
return fmt.Errorf("error could not send message %s", string(body)) return fmt.Errorf("error could not send message %s", string(body))
} }
@@ -125,11 +126,11 @@ func NotifySuccess(notificationData *NotificationData) {
if err == nil { if err == nil {
body, err := parseTemplate(*notificationData, "email.tmpl") body, err := parseTemplate(*notificationData, "email.tmpl")
if err != nil { if err != nil {
Error("Could not parse email template: %v", err) logger.Error("Could not parse email template: %v", err)
} }
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification %s", notificationData.Database), body) err = SendEmail(fmt.Sprintf("✅ Database Backup Notification %s", notificationData.Database), body)
if err != nil { if err != nil {
Error("Could not send email: %v", err) logger.Error("Could not send email: %v", err)
} }
} }
// Telegram notification // Telegram notification
@@ -137,12 +138,12 @@ func NotifySuccess(notificationData *NotificationData) {
if err == nil { if err == nil {
message, err := parseTemplate(*notificationData, "telegram.tmpl") message, err := parseTemplate(*notificationData, "telegram.tmpl")
if err != nil { if err != nil {
Error("Could not parse telegram template: %v", err) logger.Error("Could not parse telegram template: %v", err)
} }
err = sendMessage(message) err = sendMessage(message)
if err != nil { if err != nil {
Error("Could not send Telegram message: %v", err) logger.Error("Could not send Telegram message: %v", err)
} }
} }
} }
@@ -169,11 +170,11 @@ func NotifyError(error string) {
BackupReference: os.Getenv("BACKUP_REFERENCE"), BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "email-error.tmpl") }, "email-error.tmpl")
if err != nil { if err != nil {
Error("Could not parse error template: %v", err) logger.Error("Could not parse error template: %v", err)
} }
err = SendEmail(fmt.Sprintf("🔴 Urgent: Database Backup Failure Notification"), body) err = SendEmail("🔴 Urgent: Database Backup Failure Notification", body)
if err != nil { if err != nil {
Error("Could not send email: %v", err) logger.Error("Could not send email: %v", err)
} }
} }
// Telegram notification // Telegram notification
@@ -185,13 +186,13 @@ func NotifyError(error string) {
BackupReference: os.Getenv("BACKUP_REFERENCE"), BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "telegram-error.tmpl") }, "telegram-error.tmpl")
if err != nil { if err != nil {
Error("Could not parse error template: %v", err) logger.Error("Could not parse error template: %v", err)
} }
err = sendMessage(message) err = sendMessage(message)
if err != nil { if err != nil {
Error("Could not send telegram message: %v", err) logger.Error("Could not send telegram message: %v", err)
} }
} }
} }

View File

@@ -26,6 +26,7 @@ package utils
import ( import (
"fmt" "fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"io" "io"
@@ -35,6 +36,8 @@ import (
"time" "time"
) )
var Version = "development"
// FileExists checks if the file does exist // FileExists checks if the file does exist
func FileExists(filename string) bool { func FileExists(filename string) bool {
info, err := os.Stat(filename) info, err := os.Stat(filename)
@@ -49,7 +52,13 @@ func WriteToFile(filePath, content string) error {
if err != nil { if err != nil {
return err return err
} }
defer file.Close() defer func(file *os.File) {
err := file.Close()
if err != nil {
return
}
}(file)
_, err = file.WriteString(content) _, err = file.WriteString(content)
return err return err
@@ -67,14 +76,25 @@ func CopyFile(src, dst string) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to open source file: %v", err) return fmt.Errorf("failed to open source file: %v", err)
} }
defer sourceFile.Close() defer func(sourceFile *os.File) {
err := sourceFile.Close()
if err != nil {
return
}
}(sourceFile)
// Create the destination file // Create the destination file
destinationFile, err := os.Create(dst) destinationFile, err := os.Create(dst)
if err != nil { if err != nil {
return fmt.Errorf("failed to create destination file: %v", err) return fmt.Errorf("failed to create destination file: %v", err)
} }
defer destinationFile.Close() defer func(destinationFile *os.File) {
err := destinationFile.Close()
if err != nil {
return
}
}(destinationFile)
// Copy the content from source to destination // Copy the content from source to destination
_, err = io.Copy(destinationFile, sourceFile) _, err = io.Copy(destinationFile, sourceFile)
@@ -92,7 +112,7 @@ func CopyFile(src, dst string) error {
} }
func ChangePermission(filePath string, mod int) { func ChangePermission(filePath string, mod int) {
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil { if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
Fatal("Error changing permissions of %s: %v\n", filePath, err) logger.Fatal("Error changing permissions of %s: %v\n", filePath, err)
} }
} }
@@ -101,7 +121,12 @@ func IsDirEmpty(name string) (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
defer f.Close() defer func(f *os.File) {
err := f.Close()
if err != nil {
return
}
}(f)
_, err = f.Readdirnames(1) _, err = f.Readdirnames(1)
if err == nil { if err == nil {
@@ -149,7 +174,7 @@ func GetEnvVariable(envName, oldEnvName string) string {
if err != nil { if err != nil {
return value return value
} }
Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName) logger.Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
} }
} }
return value return value
@@ -196,10 +221,11 @@ func GetIntEnv(envName string) int {
} }
ret, err := strconv.Atoi(val) ret, err := strconv.Atoi(val)
if err != nil { if err != nil {
Error("Error: %v", err) logger.Error("Error: %v", err)
} }
return ret return ret
} }
func EnvWithDefault(envName string, defaultValue string) string { func EnvWithDefault(envName string, defaultValue string) string {
value := os.Getenv(envName) value := os.Getenv(envName)
if value == "" { if value == "" {
@@ -220,13 +246,12 @@ func CronNextTime(cronExpr string) time.Time {
// Parse the cron expression // Parse the cron expression
schedule, err := cron.ParseStandard(cronExpr) schedule, err := cron.ParseStandard(cronExpr)
if err != nil { if err != nil {
Error("Error parsing cron expression: %s", err) logger.Error("Error parsing cron expression: %s", err)
return time.Time{} return time.Time{}
} }
// Get the current time // Get the current time
now := time.Now() now := time.Now()
// Get the next scheduled time // Get the next scheduled time
next := schedule.Next(now) next := schedule.Next(now)
//Info("The next scheduled time is: %v\n", next)
return next return next
} }