refactor: refactoring of code

This commit is contained in:
Jonas Kaninda
2024-12-06 14:21:55 +01:00
parent 9016a9ec7a
commit afd4afc83b
19 changed files with 483 additions and 340 deletions

43
.golangci.yml Normal file
View File

@@ -0,0 +1,43 @@
run:
timeout: 5m
allow-parallel-runners: true
issues:
# don't skip warning about doc comments
# don't exclude the default set of lint
exclude-use-default: false
# restore some of the defaults
# (fill in the rest as needed)
exclude-rules:
- path: "internal/*"
linters:
- dupl
- lll
- goimports
linters:
disable-all: true
enable:
- dupl
- errcheck
- copyloopvar
- ginkgolinter
- goconst
- gocyclo
- gofmt
- gosimple
- govet
- ineffassign
- misspell
- nakedret
- prealloc
- revive
- staticcheck
- typecheck
- unconvert
- unparam
- unused
linters-settings:
revive:
rules:
- name: comment-spacings

View File

@@ -1,5 +1,6 @@
FROM golang:1.23.2 AS build
WORKDIR /app
ARG appVersion=""
# Copy the source code.
COPY . .
@@ -7,7 +8,7 @@ COPY . .
RUN go mod download
# Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-X 'github.com/jkaninda/pg-bkup/utils.Version=${appVersion}'" -o /app/mysql-bkup
FROM alpine:3.20.3
ENV TZ=UTC

View File

@@ -91,7 +91,6 @@ services:
networks:
web:
```
### Docker recurring backup
```shell

View File

@@ -26,6 +26,7 @@ package cmd
import (
"github.com/jkaninda/mysql-bkup/internal"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
)
@@ -38,7 +39,7 @@ var BackupCmd = &cobra.Command{
if len(args) == 0 {
internal.StartBackup(cmd)
} else {
utils.Fatal(`"backup" accepts no argument %q`, args)
logger.Fatal(`"backup" accepts no argument %q`, args)
}
},
}

View File

@@ -26,7 +26,7 @@ package cmd
import (
"github.com/jkaninda/mysql-bkup/internal"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/spf13/cobra"
)
@@ -37,7 +37,7 @@ var MigrateCmd = &cobra.Command{
if len(args) == 0 {
internal.StartMigration(cmd)
} else {
utils.Fatal(`"migrate" accepts no argument %q`, args)
logger.Fatal(`"migrate" accepts no argument %q`, args)
}

View File

@@ -25,6 +25,7 @@ SOFTWARE.
*/
import (
"github.com/jkaninda/mysql-bkup/internal"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
)
@@ -37,7 +38,7 @@ var RestoreCmd = &cobra.Command{
if len(args) == 0 {
internal.StartRestore(cmd)
} else {
utils.Fatal(`"restore" accepts no argument %q`, args)
logger.Fatal(`"restore" accepts no argument %q`, args)
}

View File

@@ -6,23 +6,39 @@ nav_order: 1
# About mysql-bkup
{:.no_toc}
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
It also supports __encrypting__ your backups using GPG.
Telegram and Email notifications on successful and failed backups.
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
It supports a variety of storage options and ensures data security through GPG encryption.
## Features
We are open to receiving stars, PRs, and issues!
- **Storage Options:**
- Local storage
- AWS S3 or any S3-compatible object storage
- FTP
- SSH-compatible storage
- **Data Security:**
- Backups can be encrypted using **GPG** to ensure confidentiality.
{: .fs-6 .fw-300 }
- **Deployment Flexibility:**
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- Supports recurring backups of PostgreSQL databases when deployed:
- On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes.
---
- **Notifications:**
- Get real-time updates on backup success or failure via:
- **Telegram**
- **Email**
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
## Use Cases
- **Automated Recurring Backups:** Schedule regular backups for PostgreSQL databases.
- **Cross-Environment Migration:** Easily migrate your PostgreSQL databases across different environments using supported storage options.
- **Secure Backup Management:** Protect your data with Gmysql encryption.
It also supports database __encryption__ using GPG.
{: .note }

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/jkaninda/mysql-bkup
go 1.22.5
go 1.23.2
require github.com/spf13/pflag v1.0.5 // indirect

2
go.sum
View File

@@ -11,6 +11,8 @@ github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9Hu
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ=
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=

View File

@@ -31,6 +31,7 @@ import (
"github.com/jkaninda/go-storage/pkg/local"
"github.com/jkaninda/go-storage/pkg/s3"
"github.com/jkaninda/go-storage/pkg/ssh"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
@@ -43,9 +44,9 @@ import (
func StartBackup(cmd *cobra.Command) {
intro()
//Initialize backup configs
// Initialize backup configs
config := initBackupConfig(cmd)
//Load backup configuration file
// Load backup configuration file
configFile, err := loadConfigFile()
if err != nil {
dbConf = initDbConfig(cmd)
@@ -55,7 +56,7 @@ func StartBackup(cmd *cobra.Command) {
if utils.IsValidCronExpression(config.cronExpression) {
scheduledMode(dbConf, config)
} else {
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
logger.Fatal("Cron expression is not valid: %s", config.cronExpression)
}
}
} else {
@@ -66,22 +67,22 @@ func StartBackup(cmd *cobra.Command) {
// scheduledMode Runs backup in scheduled mode
func scheduledMode(db *dbConfig, config *BackupConfig) {
utils.Info("Running in Scheduled mode")
utils.Info("Backup cron expression: %s", config.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", config.storage)
logger.Info("Running in Scheduled mode")
logger.Info("Backup cron expression: %s", config.cronExpression)
logger.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
logger.Info("Storage type %s ", config.storage)
//Test backup
utils.Info("Testing backup configurations...")
BackupTask(db, config)
utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...")
// Test backup
logger.Info("Testing backup configurations...")
testDatabaseConnection(db)
logger.Info("Testing backup configurations...done")
logger.Info("Creating backup job...")
// Create a new cron instance
c := cron.New()
_, err := c.AddFunc(config.cronExpression, func() {
BackupTask(db, config)
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
logger.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
})
if err != nil {
@@ -89,8 +90,8 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
}
// Start the cron scheduler
c.Start()
utils.Info("Creating backup job...done")
utils.Info("Backup job started")
logger.Info("Creating backup job...done")
logger.Info("Backup job started")
defer c.Stop()
select {}
}
@@ -98,7 +99,7 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
// multiBackupTask backup multi database
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
for _, db := range databases {
//Check if path is defined in config file
// Check if path is defined in config file
if db.Path != "" {
bkConfig.remotePath = db.Path
}
@@ -108,8 +109,8 @@ func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
// BackupTask backups database
func BackupTask(db *dbConfig, config *BackupConfig) {
utils.Info("Starting backup task...")
//Generate file name
logger.Info("Starting backup task...")
// Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
if config.disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
@@ -129,37 +130,42 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
}
}
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Starting backup task...")
logger.Info("Starting backup task...")
conf, err := readConf(configFile)
if err != nil {
utils.Fatal("Error reading config file: %s", err)
logger.Fatal("Error reading config file: %s", err)
}
//Check if cronExpression is defined in config file
// Check if cronExpression is defined in config file
if conf.CronExpression != "" {
bkConfig.cronExpression = conf.CronExpression
}
if len(conf.Databases) == 0 {
logger.Fatal("No databases found")
}
// Check if cronExpression is defined
if bkConfig.cronExpression == "" {
multiBackupTask(conf.Databases, bkConfig)
} else {
// Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) {
utils.Info("Running backup in Scheduled mode")
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", bkConfig.storage)
logger.Info("Running backup in Scheduled mode")
logger.Info("Backup cron expression: %s", bkConfig.cronExpression)
logger.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
logger.Info("Storage type %s ", bkConfig.storage)
//Test backup
utils.Info("Testing backup configurations...")
multiBackupTask(conf.Databases, bkConfig)
utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...")
// Test backup
logger.Info("Testing backup configurations...")
for _, db := range conf.Databases {
testDatabaseConnection(getDatabase(db))
}
logger.Info("Testing backup configurations...done")
logger.Info("Creating backup job...")
// Create a new cron instance
c := cron.New()
_, err := c.AddFunc(bkConfig.cronExpression, func() {
multiBackupTask(conf.Databases, bkConfig)
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
logger.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
})
if err != nil {
@@ -167,13 +173,13 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
}
// Start the cron scheduler
c.Start()
utils.Info("Creating backup job...done")
utils.Info("Backup job started")
logger.Info("Creating backup job...done")
logger.Info("Backup job started")
defer c.Stop()
select {}
} else {
utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
logger.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
}
}
@@ -181,10 +187,9 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
// BackupDatabase backup database
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
storagePath = os.Getenv("STORAGE_PATH")
utils.Info("Starting database backup...")
logger.Info("Starting database backup...")
err := os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil {
@@ -192,7 +197,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
}
testDatabaseConnection(db)
// Backup Database database
utils.Info("Backing up database...")
logger.Info("Backing up database...")
// Verify is compression is disabled
if disableCompression {
@@ -205,21 +210,26 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
)
output, err := cmd.Output()
if err != nil {
log.Fatal(err)
logger.Fatal(err.Error())
}
// save output
file, err := os.Create(filepath.Join(tmpPath, backupFileName))
if err != nil {
log.Fatal(err)
logger.Fatal(err.Error())
}
defer file.Close()
defer func(file *os.File) {
err := file.Close()
if err != nil {
logger.Fatal(err.Error())
}
}(file)
_, err = file.Write(output)
if err != nil {
log.Fatal(err)
logger.Fatal(err.Error())
}
utils.Info("Database has been backed up")
logger.Info("Database has been backed up")
} else {
// Execute mysqldump
@@ -231,9 +241,9 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
gzipCmd.Start()
err = gzipCmd.Start()
if err != nil {
log.Fatal(err)
return
}
if err := cmd.Run(); err != nil {
log.Fatal(err)
@@ -241,12 +251,12 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
if err := gzipCmd.Wait(); err != nil {
log.Fatal(err)
}
utils.Info("Database has been backed up")
logger.Info("Database has been backed up")
}
}
func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage")
logger.Info("Backup database to local storage")
startTime = time.Now().Format(utils.TimeFormat())
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
@@ -256,20 +266,20 @@ func localBackup(db *dbConfig, config *BackupConfig) {
}
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
logger.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
utils.Info("Backup name is %s", finalFileName)
logger.Info("Backup name is %s", finalFileName)
localStorage := local.NewStorage(local.Config{
LocalPath: tmpPath,
RemotePath: storagePath,
})
err = localStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
logger.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
//Send notification
logger.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
@@ -279,36 +289,36 @@ func localBackup(db *dbConfig, config *BackupConfig) {
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
//Delete old backup
// Delete old backup
if config.prune {
err = localStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
//Delete temp
// Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
logger.Info("Backup completed successfully")
}
func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to s3 storage")
logger.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat())
//Backup database
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage S3 ... ")
logger.Info("Uploading backup archive to remote storage S3 ... ")
awsConfig := initAWSConfig()
if config.remotePath == "" {
config.remotePath = awsConfig.remotePath
}
utils.Info("Backup name is %s", finalFileName)
logger.Info("Backup name is %s", finalFileName)
s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
@@ -321,20 +331,20 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating s3 storage: %s", err)
logger.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
logger.Fatal("Error copying backup file: %s", err)
}
//Get backup info
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
logger.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
//Delete backup file from tmp folder
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
@@ -344,12 +354,12 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
if config.prune {
err := s3Storage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
utils.Info("Uploading backup archive to remote storage S3 ... done ")
//Send notification
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
logger.Info("Uploading backup archive to remote storage S3 ... done ")
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
@@ -359,26 +369,26 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
//Delete temp
// Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
logger.Info("Backup completed successfully")
}
func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server")
logger.Info("Backup database to Remote server")
startTime = time.Now().Format(utils.TimeFormat())
//Backup database
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName)
logger.Info("Uploading backup archive to remote storage ... ")
logger.Info("Backup name is %s", finalFileName)
sshConfig, err := loadSSHConfig()
if err != nil {
utils.Fatal("Error loading ssh config: %s", err)
logger.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
@@ -386,40 +396,39 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
IdentifyFile: sshConfig.identifyFile,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
logger.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
logger.Fatal("Error copying backup file: %s", err)
}
//Get backup info
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
logger.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
//Delete backup file from tmp folder
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
logger.Error("Error deleting file: %v", err)
}
if config.prune {
err := sshStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Uploading backup archive to remote storage ... done ")
//Send notification
logger.Info("Uploading backup archive to remote storage ... done ")
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
@@ -429,24 +438,24 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
//Delete temp
// Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
logger.Info("Backup completed successfully")
}
func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server")
logger.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat())
//Backup database
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName)
logger.Info("Uploading backup archive to the remote FTP server ... ")
logger.Info("Backup name is %s", finalFileName)
ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
@@ -457,36 +466,36 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
logger.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
logger.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
//Get backup info
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
logger.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
//Delete backup file from tmp folder
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
logger.Error("Error deleting file: %v", err)
}
if config.prune {
err := ftpStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Uploading backup archive to the remote FTP server ... done ")
logger.Info("Uploading backup archive to the remote FTP server ... done ")
//Send notification
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
@@ -496,36 +505,36 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
//Delete temp
// Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
logger.Info("Backup completed successfully")
}
func encryptBackup(config *BackupConfig) {
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
if err != nil {
utils.Fatal("Error reading backup file: %s ", err)
logger.Fatal("Error reading backup file: %s ", err)
}
if config.usingKey {
utils.Info("Encrypting backup using public key...")
logger.Info("Encrypting backup using public key...")
pubKey, err := os.ReadFile(config.publicKey)
if err != nil {
utils.Fatal("Error reading public key: %s ", err)
logger.Fatal("Error reading public key: %s ", err)
}
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
if err != nil {
utils.Fatal("Error encrypting backup file: %v ", err)
logger.Fatal("Error encrypting backup file: %v ", err)
}
utils.Info("Encrypting backup using public key...done")
logger.Info("Encrypting backup using public key...done")
} else if config.passphrase != "" {
utils.Info("Encrypting backup using passphrase...")
logger.Info("Encrypting backup using passphrase...")
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
if err != nil {
utils.Fatal("error during encrypting backup %v", err)
logger.Fatal("error during encrypting backup %v", err)
}
utils.Info("Encrypting backup using passphrase...done")
logger.Info("Encrypting backup using passphrase...done")
}

View File

@@ -26,6 +26,7 @@ package internal
import (
"fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
@@ -104,7 +105,7 @@ type AWSConfig struct {
}
func initDbConfig(cmd *cobra.Command) *dbConfig {
//Set env
// Set env
utils.GetEnv(cmd, "dbname", "DB_NAME")
dConf := dbConfig{}
dConf.dbHost = os.Getenv("DB_HOST")
@@ -115,8 +116,8 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
logger.Error("Please make sure all required environment variables for database are set")
logger.Fatal("Error checking environment variables: %s", err)
}
return &dConf
}
@@ -149,7 +150,7 @@ func loadSSHConfig() (*SSHConfig, error) {
}, nil
}
func loadFtpConfig() *FTPConfig {
//Initialize data configs
// Initialize data configs
fConfig := FTPConfig{}
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
fConfig.user = os.Getenv("FTP_USER")
@@ -158,13 +159,13 @@ func loadFtpConfig() *FTPConfig {
fConfig.remotePath = os.Getenv("REMOTE_PATH")
err := utils.CheckEnvVars(ftpVars)
if err != nil {
utils.Error("Please make sure all required environment variables for FTP are set")
utils.Fatal("Error missing environment variables: %s", err)
logger.Error("Please make sure all required environment variables for FTP are set")
logger.Fatal("Error missing environment variables: %s", err)
}
return &fConfig
}
func initAWSConfig() *AWSConfig {
//Initialize AWS configs
// Initialize AWS configs
aConfig := AWSConfig{}
aConfig.endpoint = utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
@@ -185,8 +186,8 @@ func initAWSConfig() *AWSConfig {
aConfig.forcePathStyle = forcePathStyle
err = utils.CheckEnvVars(awsVars)
if err != nil {
utils.Error("Please make sure all required environment variables for AWS S3 are set")
utils.Fatal("Error checking environment variables: %s", err)
logger.Error("Please make sure all required environment variables for AWS S3 are set")
logger.Fatal("Error checking environment variables: %s", err)
}
return &aConfig
}
@@ -194,7 +195,7 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env
// Get flag value and set env
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
prune := false
@@ -216,7 +217,7 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
encryption = true
usingKey = false
}
//Initialize backup configs
// Initialize backup configs
config := BackupConfig{}
config.backupRetention = backupRetention
config.disableCompression = disableCompression
@@ -246,7 +247,7 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env
// Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
@@ -260,7 +261,7 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
usingKey = false
}
//Initialize restore configs
// Initialize restore configs
rConfig := RestoreConfig{}
rConfig.s3Path = s3Path
rConfig.remotePath = remotePath
@@ -276,15 +277,15 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
func initTargetDbConfig() *targetDbConfig {
tdbConfig := targetDbConfig{}
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "5432")
tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "3306")
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
err := utils.CheckEnvVars(tdbRVars)
if err != nil {
utils.Error("Please make sure all required environment variables for the target database are set")
utils.Fatal("Error checking target database environment variables: %s", err)
logger.Error("Please make sure all required environment variables for the target database are set")
logger.Fatal("Error checking target database environment variables: %s", err)
}
return &tdbConfig
}

View File

@@ -27,6 +27,7 @@ package internal
import (
"bytes"
"fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils"
"gopkg.in/yaml.v3"
"os"
@@ -36,13 +37,14 @@ import (
)
func intro() {
utils.Info("Starting MySQL Backup...")
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
fmt.Println("Starting MySQL Backup...")
fmt.Printf("Version: %s\n", utils.Version)
fmt.Println("Copyright (c) 2024 Jonas Kaninda")
}
// copyToTmp copy file to temporary directory
func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath)
logger.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
@@ -58,9 +60,9 @@ func deleteTemp() {
return nil
})
if err != nil {
utils.Error("Error deleting files: %v", err)
logger.Error("Error deleting files: %v", err)
} else {
utils.Info("Deleting %s ... done", tmpPath)
logger.Info("Deleting %s ... done", tmpPath)
}
}
@@ -70,7 +72,7 @@ func testDatabaseConnection(db *dbConfig) {
if err != nil {
return
}
utils.Info("Connecting to %s database ...", db.dbName)
logger.Info("Connecting to %s database ...", db.dbName)
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
@@ -78,10 +80,10 @@ func testDatabaseConnection(db *dbConfig) {
cmd.Stderr = &out
err = cmd.Run()
if err != nil {
utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
logger.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
}
utils.Info("Successfully connected to %s database", db.dbName)
logger.Info("Successfully connected to %s database", db.dbName)
}

View File

@@ -26,19 +26,19 @@ package internal
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/spf13/cobra"
"time"
)
func StartMigration(cmd *cobra.Command) {
intro()
utils.Info("Starting database migration...")
//Get DB config
logger.Info("Starting database migration...")
// Get DB config
dbConf = initDbConfig(cmd)
targetDbConf = initTargetDbConfig()
//Defining the target database variables
// Defining the target database variables
newDbConfig := dbConfig{}
newDbConfig.dbHost = targetDbConf.targetDbHost
newDbConfig.dbPort = targetDbConf.targetDbPort
@@ -46,15 +46,15 @@ func StartMigration(cmd *cobra.Command) {
newDbConfig.dbUserName = targetDbConf.targetDbUserName
newDbConfig.dbPassword = targetDbConf.targetDbPassword
//Generate file name
// Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
conf := &RestoreConfig{}
conf.file = backupFileName
//Backup source Database
// Backup source Database
BackupDatabase(dbConf, backupFileName, true)
//Restore source database into target database
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
// Restore source database into target database
logger.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
RestoreDatabase(&newDbConfig, conf)
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
utils.Info("Database migration completed.")
logger.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
logger.Info("Database migration completed.")
}

View File

@@ -1,9 +1,4 @@
// Package internal /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package internal
/*
@@ -35,6 +30,7 @@ import (
"github.com/jkaninda/go-storage/pkg/local"
"github.com/jkaninda/go-storage/pkg/s3"
"github.com/jkaninda/go-storage/pkg/ssh"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
@@ -61,20 +57,20 @@ func StartRestore(cmd *cobra.Command) {
}
}
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
utils.Info("Restore database from local")
logger.Info("Restore database from local")
localStorage := local.NewStorage(local.Config{
RemotePath: storagePath,
LocalPath: tmpPath,
})
err := localStorage.CopyFrom(restoreConf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
logger.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(dbConf, restoreConf)
}
func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from s3")
logger.Info("Restore database from s3")
awsConfig := initAWSConfig()
if conf.remotePath == "" {
conf.remotePath = awsConfig.remotePath
@@ -91,19 +87,19 @@ func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating s3 storage: %s", err)
logger.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error download file from S3 storage: %s", err)
logger.Fatal("Error download file from S3 storage: %s", err)
}
RestoreDatabase(db, conf)
}
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from remote server")
logger.Info("Restore database from remote server")
sshConfig, err := loadSSHConfig()
if err != nil {
utils.Fatal("Error loading ssh config: %s", err)
logger.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
@@ -111,20 +107,21 @@ func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
IdentifyFile: sshConfig.identifyFile,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
logger.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
logger.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(db, conf)
}
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from FTP server")
logger.Info("Restore database from FTP server")
ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
@@ -135,11 +132,11 @@ func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
logger.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
logger.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(db, conf)
}
@@ -147,42 +144,42 @@ func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
// RestoreDatabase restore database
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
if conf.file == "" {
utils.Fatal("Error, file required")
logger.Fatal("Error, file required")
}
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file))
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file))
if err != nil {
utils.Fatal("Error reading backup file: %s ", err)
logger.Fatal("Error reading backup file: %s ", err)
}
if extension == ".gpg" {
if conf.usingKey {
utils.Info("Decrypting backup using private key...")
utils.Warn("Backup decryption using a private key is not fully supported")
logger.Info("Decrypting backup using private key...")
logger.Warn("Backup decryption using a private key is not fully supported")
prKey, err := os.ReadFile(conf.privateKey)
if err != nil {
utils.Fatal("Error reading public key: %s ", err)
logger.Fatal("Error reading public key: %s ", err)
}
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
if err != nil {
utils.Fatal("error during decrypting backup %v", err)
logger.Fatal("error during decrypting backup %v", err)
}
utils.Info("Decrypting backup using private key...done")
logger.Info("Decrypting backup using private key...done")
} else {
if conf.passphrase == "" {
utils.Error("Error, passphrase or private key required")
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
logger.Error("Error, passphrase or private key required")
logger.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
} else {
utils.Info("Decrypting backup using passphrase...")
//decryptWithGPG file
logger.Info("Decrypting backup using passphrase...")
// decryptWithGPG file
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
if err != nil {
utils.Fatal("Error decrypting file %s %v", file, err)
logger.Fatal("Error decrypting file %s %v", file, err)
}
utils.Info("Decrypting backup using passphrase...done")
//Update file name
logger.Info("Decrypting backup using passphrase...done")
// Update file name
conf.file = RemoveLastExtension(file)
}
}
@@ -195,7 +192,7 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
return
}
testDatabaseConnection(db)
utils.Info("Restoring database...")
logger.Info("Restoring database...")
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
// Restore from compressed file / .sql.gz
@@ -203,29 +200,29 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database %v", err)
logger.Fatal("Error, in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Info("Database has been restored")
//Delete temp
logger.Info("Restoring database... done")
logger.Info("Database has been restored")
// Delete temp
deleteTemp()
} else if extension == ".sql" {
//Restore from sql file
// Restore from sql file
str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
utils.Fatal("Error in restoring the database %v", err)
logger.Fatal("Error in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Info("Database has been restored")
//Delete temp
logger.Info("Restoring database... done")
logger.Info("Database has been restored")
// Delete temp
deleteTemp()
} else {
utils.Fatal("Unknown file extension %s", extension)
logger.Fatal("Unknown file extension %s", extension)
}
} else {
utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
logger.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
}
}

97
pkg/logger/logger.go Normal file
View File

@@ -0,0 +1,97 @@
package logger
import (
"fmt"
"log"
"os"
"runtime"
"strings"
)
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// Info returns info log
func Info(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stdout"))
logWithCaller("INFO", msg, args...)
}
// Warn returns warning log
func Warn(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stdout"))
logWithCaller("WARN", msg, args...)
}
// Error logs error messages
func Error(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stderr"))
logWithCaller("ERROR", msg, args...)
}
func Fatal(msg string, args ...interface{}) {
log.SetOutput(os.Stdout)
logWithCaller("ERROR", msg, args...)
os.Exit(1)
}
// Helper function to format and log messages with file and line number
func logWithCaller(level, msg string, args ...interface{}) {
// Format message if there are additional arguments
formattedMessage := msg
if len(args) > 0 {
formattedMessage = fmt.Sprintf(msg, args...)
}
// Get the caller's file and line number (skip 2 frames)
_, file, line, ok := runtime.Caller(2)
if !ok {
file = "unknown"
line = 0
}
// Log message with caller information if GOMA_LOG_LEVEL is trace
if strings.ToLower(level) != "off" {
if strings.ToLower(level) == traceLog {
log.Printf("%s: %s (File: %s, Line: %d)\n", level, formattedMessage, file, line)
} else {
log.Printf("%s: %s\n", level, formattedMessage)
}
}
}
func getStd(out string) *os.File {
switch out {
case "/dev/stdout":
return os.Stdout
case "/dev/stderr":
return os.Stderr
case "/dev/stdin":
return os.Stdin
default:
return os.Stdout
}
}

26
pkg/logger/var.go Normal file
View File

@@ -0,0 +1,26 @@
package logger
/*
MIT License
# Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
const traceLog = "trace"

View File

@@ -1,78 +0,0 @@
// Package utils /
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import (
"fmt"
"os"
"time"
)
func Info(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
}
// Warn warning message
func Warn(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s WARN: %s\n", currentTime, msg)
} else {
fmt.Printf("%s WARN: %s\n", currentTime, formattedMessage)
}
}
func Error(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
}
}
// Fatal logs an error message and exits the program
func Fatal(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
// Fatal logs an error message and exits the program.
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
NotifyError(msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
NotifyError(formattedMessage)
}
os.Exit(1)
}

View File

@@ -1,20 +1,5 @@
package utils
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/go-mail/mail"
"html/template"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
/*
MIT License
@@ -39,6 +24,22 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/go-mail/mail"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"html/template"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
func parseTemplate[T any](data T, fileName string) (string, error) {
// Open the file
tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName))
@@ -55,7 +56,7 @@ func parseTemplate[T any](data T, fileName string) (string, error) {
}
func SendEmail(subject, body string) error {
Info("Start sending email notification....")
logger.Info("Start sending email notification....")
config := loadMailConfig()
emails := strings.Split(config.MailTo, ",")
m := mail.NewMessage()
@@ -67,16 +68,16 @@ func SendEmail(subject, body string) error {
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
if err := d.DialAndSend(m); err != nil {
Error("Error could not send email : %v", err)
logger.Error("Error could not send email : %v", err)
return err
}
Info("Email notification has been sent")
logger.Info("Email notification has been sent")
return nil
}
func sendMessage(msg string) error {
Info("Sending Telegram notification... ")
logger.Info("Sending Telegram notification... ")
chatId := os.Getenv("TG_CHAT_ID")
body, _ := json.Marshal(map[string]string{
"chat_id": chatId,
@@ -96,11 +97,11 @@ func sendMessage(msg string) error {
}
code := response.StatusCode
if code == 200 {
Info("Telegram notification has been sent")
logger.Info("Telegram notification has been sent")
return nil
} else {
body, _ := ioutil.ReadAll(response.Body)
Error("Error could not send message, error: %s", string(body))
body, _ := io.ReadAll(response.Body)
logger.Error("Error could not send message, error: %s", string(body))
return fmt.Errorf("error could not send message %s", string(body))
}
@@ -120,29 +121,29 @@ func NotifySuccess(notificationData *NotificationData) {
"MAIL_TO",
}
//Email notification
// Email notification
err := CheckEnvVars(mailVars)
if err == nil {
body, err := parseTemplate(*notificationData, "email.tmpl")
if err != nil {
Error("Could not parse email template: %v", err)
logger.Error("Could not parse email template: %v", err)
}
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification %s", notificationData.Database), body)
if err != nil {
Error("Could not send email: %v", err)
logger.Error("Could not send email: %v", err)
}
}
//Telegram notification
// Telegram notification
err = CheckEnvVars(vars)
if err == nil {
message, err := parseTemplate(*notificationData, "telegram.tmpl")
if err != nil {
Error("Could not parse telegram template: %v", err)
logger.Error("Could not parse telegram template: %v", err)
}
err = sendMessage(message)
if err != nil {
Error("Could not send Telegram message: %v", err)
logger.Error("Could not send Telegram message: %v", err)
}
}
}
@@ -160,7 +161,7 @@ func NotifyError(error string) {
"MAIL_TO",
}
//Email notification
// Email notification
err := CheckEnvVars(mailVars)
if err == nil {
body, err := parseTemplate(ErrorMessage{
@@ -169,14 +170,14 @@ func NotifyError(error string) {
BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "email-error.tmpl")
if err != nil {
Error("Could not parse error template: %v", err)
logger.Error("Could not parse error template: %v", err)
}
err = SendEmail(fmt.Sprintf("🔴 Urgent: Database Backup Failure Notification"), body)
err = SendEmail("🔴 Urgent: Database Backup Failure Notification", body)
if err != nil {
Error("Could not send email: %v", err)
logger.Error("Could not send email: %v", err)
}
}
//Telegram notification
// Telegram notification
err = CheckEnvVars(vars)
if err == nil {
message, err := parseTemplate(ErrorMessage{
@@ -185,13 +186,13 @@ func NotifyError(error string) {
BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "telegram-error.tmpl")
if err != nil {
Error("Could not parse error template: %v", err)
logger.Error("Could not parse error template: %v", err)
}
err = sendMessage(message)
if err != nil {
Error("Could not send telegram message: %v", err)
logger.Error("Could not send telegram message: %v", err)
}
}
}

View File

@@ -26,6 +26,7 @@ package utils
import (
"fmt"
"github.com/jkaninda/mysql-bkup/pkg/logger"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"io"
@@ -35,6 +36,8 @@ import (
"time"
)
var Version = "development"
// FileExists checks if the file does exist
func FileExists(filename string) bool {
info, err := os.Stat(filename)
@@ -49,7 +52,13 @@ func WriteToFile(filePath, content string) error {
if err != nil {
return err
}
defer file.Close()
defer func(file *os.File) {
err := file.Close()
if err != nil {
return
}
}(file)
_, err = file.WriteString(content)
return err
@@ -67,14 +76,25 @@ func CopyFile(src, dst string) error {
if err != nil {
return fmt.Errorf("failed to open source file: %v", err)
}
defer sourceFile.Close()
defer func(sourceFile *os.File) {
err := sourceFile.Close()
if err != nil {
return
}
}(sourceFile)
// Create the destination file
destinationFile, err := os.Create(dst)
if err != nil {
return fmt.Errorf("failed to create destination file: %v", err)
}
defer destinationFile.Close()
defer func(destinationFile *os.File) {
err := destinationFile.Close()
if err != nil {
return
}
}(destinationFile)
// Copy the content from source to destination
_, err = io.Copy(destinationFile, sourceFile)
@@ -92,7 +112,7 @@ func CopyFile(src, dst string) error {
}
func ChangePermission(filePath string, mod int) {
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
Fatal("Error changing permissions of %s: %v\n", filePath, err)
logger.Fatal("Error changing permissions of %s: %v\n", filePath, err)
}
}
@@ -101,7 +121,12 @@ func IsDirEmpty(name string) (bool, error) {
if err != nil {
return false, err
}
defer f.Close()
defer func(f *os.File) {
err := f.Close()
if err != nil {
return
}
}(f)
_, err = f.Readdirnames(1)
if err == nil {
@@ -149,7 +174,7 @@ func GetEnvVariable(envName, oldEnvName string) string {
if err != nil {
return value
}
Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
logger.Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
}
}
return value
@@ -196,10 +221,11 @@ func GetIntEnv(envName string) int {
}
ret, err := strconv.Atoi(val)
if err != nil {
Error("Error: %v", err)
logger.Error("Error: %v", err)
}
return ret
}
func EnvWithDefault(envName string, defaultValue string) string {
value := os.Getenv(envName)
if value == "" {
@@ -220,13 +246,12 @@ func CronNextTime(cronExpr string) time.Time {
// Parse the cron expression
schedule, err := cron.ParseStandard(cronExpr)
if err != nil {
Error("Error parsing cron expression: %s", err)
logger.Error("Error parsing cron expression: %s", err)
return time.Time{}
}
// Get the current time
now := time.Now()
// Get the next scheduled time
next := schedule.Next(now)
//Info("The next scheduled time is: %v\n", next)
return next
}