diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..445e2be --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,23 @@ +name: Lint + +on: + push: + pull_request: + +jobs: + lint: + name: Run on Ubuntu + runs-on: ubuntu-latest + steps: + - name: Clone the code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '~1.23' + + - name: Run linter + uses: golangci/golangci-lint-action@v6 + with: + version: v1.61 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..5c3d1eb --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,292 @@ +name: Tests +on: + push: + pull_request: +#on: +# push: +# branches: +# - main +# pull_request: +# branches: +# - main + +env: + IMAGE_NAME: mysql-bkup + +jobs: + test: + runs-on: ubuntu-latest + services: + mysql: + image: mysql:9 + env: + MYSQL_ROOT_PASSWORD: password + MYSQL_DATABASE: testdb + MYSQL_USER: user + MYSQL_PASSWORD: password + ports: + - 3306:3306 + options: >- + --health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword" + --health-interval=10s + --health-timeout=5s + --health-retries=5 + mysql8: + image: mysql:8 + env: + MYSQL_ROOT_PASSWORD: password + MYSQL_DATABASE: testdb + MYSQL_USER: user + MYSQL_PASSWORD: password + ports: + - 3308:3306 + options: >- + --health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword" + --health-interval=10s + --health-timeout=5s + --health-retries=5 + mysql5: + image: mysql:5 + env: + MYSQL_ROOT_PASSWORD: password + MYSQL_DATABASE: testdb + MYSQL_USER: user + MYSQL_PASSWORD: password + ports: + - 3305:3306 + options: >- + --health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword" + --health-interval=10s + --health-timeout=5s + --health-retries=5 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Create Minio container + run: | + docker run -d --rm --name minio \ + --network host \ + -p 9000:9000 \ + -e MINIO_ACCESS_KEY=minioadmin \ + -e MINIO_SECRET_KEY=minioadmin \ + -e MINIO_REGION_NAME="eu" \ + minio/minio server /data + echo "Create Minio container completed" + - name: Install MinIO Client (mc) + run: | + curl -O https://dl.min.io/client/mc/release/linux-amd64/mc + chmod +x mc + sudo mv mc /usr/local/bin/ + + - name: Wait for MinIO to be ready + run: sleep 5 + + - name: Configure MinIO Client + run: | + mc alias set local http://localhost:9000 minioadmin minioadmin + mc alias list + + - name: Create MinIO Bucket + run: | + mc mb local/backups + echo "Bucket backups created successfully." + # Build the Docker image + - name: Build Docker Image + run: | + docker buildx build --build-arg appVersion=test -t ${{ env.IMAGE_NAME }}:latest --load . + + - name: Verify Docker images + run: | + docker images + + - name: Wait for MySQL to be ready + run: | + docker run --rm --network host mysql:9 mysqladmin ping -h 127.0.0.1 -uuser -ppassword --wait + - name: Test restore + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=root \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + ${{ env.IMAGE_NAME }}:latest restore -f init.sql + echo "Database restore completed" + - name: Test restore Mysql8 + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_PORT=3308 \ + -e DB_USERNAME=root \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + ${{ env.IMAGE_NAME }}:latest restore -f init.sql + echo "Test restore Mysql8 completed" + - name: Test restore Mysql5 + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_PORT=3305 \ + -e DB_USERNAME=root \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + ${{ env.IMAGE_NAME }}:latest restore -f init.sql + echo "Test restore Mysql5 completed" + - name: Test backup + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=user \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + ${{ env.IMAGE_NAME }}:latest backup + echo "Database backup completed" + - name: Test backup Mysql8 + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_PORT=3308 \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=user \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + ${{ env.IMAGE_NAME }}:latest backup + echo "Test backup Mysql8 completed" + - name: Test backup Mysql5 + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_PORT=3305 \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=user \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + ${{ env.IMAGE_NAME }}:latest backup + echo "Test backup Mysql5 completed" + - name: Test encrypted backup + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=user \ + -e DB_PASSWORD=password \ + -e GPG_PASSPHRASE=password \ + -e DB_NAME=testdb \ + ${{ env.IMAGE_NAME }}:latest backup --disable-compression --custom-name encrypted-bkup + echo "Database encrypted backup completed" + - name: Test restore encrypted backup | testdb -> testdb2 + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=root \ + -e DB_PASSWORD=password \ + -e GPG_PASSPHRASE=password \ + -e DB_NAME=testdb2 \ + ${{ env.IMAGE_NAME }}:latest restore -f /backup/encrypted-bkup.sql.gpg + echo "Test restore encrypted backup completed" + - name: Test migrate database testdb -> testdb3 + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=root \ + -e DB_PASSWORD=password \ + -e GPG_PASSPHRASE=password \ + -e DB_NAME=testdb \ + -e TARGET_DB_HOST=127.0.0.1 \ + -e TARGET_DB_PORT=3306 \ + -e TARGET_DB_NAME=testdb3 \ + -e TARGET_DB_USERNAME=root \ + -e TARGET_DB_PASSWORD=password \ + ${{ env.IMAGE_NAME }}:latest migrate + echo "Test migrate database testdb -> testdb3 completed" + - name: Test backup all databases + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=root \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + ${{ env.IMAGE_NAME }}:latest backup --all-databases + echo "Database backup completed" + - name: Test multiple backup + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e TESTDB2_DB_USERNAME=root \ + -e TESTDB2_DB_PASSWORD=password \ + -e TESTDB2_DB_HOST=127.0.0.1 \ + ${{ env.IMAGE_NAME }}:latest backup -c /backup/test_config.yaml + echo "Database backup completed" + - name: Test backup Minio (s3) + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=user \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + -e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \ + -e AWS_S3_BUCKET_NAME=backups \ + -e AWS_ACCESS_KEY=minioadmin \ + -e AWS_SECRET_KEY=minioadmin \ + -e AWS_DISABLE_SSL="true" \ + -e AWS_REGION="eu" \ + -e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest backup -s s3 --custom-name minio-backup + echo "Test backup Minio (s3) completed" + - name: Test restore Minio (s3) + run: | + docker run --rm --name ${{ env.IMAGE_NAME }} \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=user \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + -e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \ + -e AWS_S3_BUCKET_NAME=backups \ + -e AWS_ACCESS_KEY=minioadmin \ + -e AWS_SECRET_KEY=minioadmin \ + -e AWS_DISABLE_SSL="true" \ + -e AWS_REGION="eu" \ + -e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest restore -s s3 -f minio-backup.sql.gz + echo "Test backup Minio (s3) completed" + - name: Test scheduled backup + run: | + docker run -d --rm --name ${{ env.IMAGE_NAME }} \ + -v ./migrations:/backup/ \ + --network host \ + -e DB_HOST=127.0.0.1 \ + -e DB_USERNAME=user \ + -e DB_PASSWORD=password \ + -e DB_NAME=testdb \ + ${{ env.IMAGE_NAME }}:latest backup -e "@every 10s" + + echo "Waiting for backup to be done..." + sleep 25 + docker logs ${{ env.IMAGE_NAME }} + echo "Test scheduled backup completed" + # Cleanup: Stop and remove containers + - name: Clean up + run: | + docker stop ${{ env.IMAGE_NAME }} || true + docker rm ${{ env.IMAGE_NAME }} || true \ No newline at end of file diff --git a/README.md b/README.md index f827a93..2c9500a 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ **MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**. It supports a variety of storage options and ensures data security through GPG encryption. +[![Tests](https://github.com/jkaninda/mysql-bkup/actions/workflows/tests.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/tests.yml) [![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml) [![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup) ![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square) diff --git a/cmd/backup.go b/cmd/backup.go index a6b2f13..290ad2a 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -50,5 +50,8 @@ func init() { BackupCmd.PersistentFlags().StringP("cron-expression", "e", "", "Backup cron expression (e.g., `0 0 * * *` or `@daily`)") BackupCmd.PersistentFlags().StringP("config", "c", "", "Configuration file for multi database backup. (e.g: `/backup/config.yaml`)") BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression") + BackupCmd.PersistentFlags().BoolP("all-databases", "a", false, "Backup all databases") + BackupCmd.PersistentFlags().BoolP("all-in-one", "A", false, "Backup all databases in a single file") + BackupCmd.PersistentFlags().StringP("custom-name", "", "", "Custom backup name") } diff --git a/docs/how-tos/backup-all.md b/docs/how-tos/backup-all.md new file mode 100644 index 0000000..a1f7355 --- /dev/null +++ b/docs/how-tos/backup-all.md @@ -0,0 +1,61 @@ +--- +title: Backup all databases in the server +layout: default +parent: How Tos +nav_order: 12 +--- + +# Backup All Databases + +MySQL-Bkup supports backing up all databases on the server using the `--all-databases` (`-a`) flag. By default, this creates separate backup files for each database. If you prefer a single backup file, you can use the `--all-in-on`e (`-A`) flag. + +Backing up all databases is useful for creating a snapshot of the entire database server, whether for disaster recovery or migration purposes. +## Backup Modes + +### Separate Backup Files (Default) + +Using --all-databases without --all-in-one creates individual backup files for each database. + +- Creates separate backup files for each database. +- Provides more flexibility in restoring individual databases or tables. +- Can be more manageable in cases where different databases have different retention policies. +- Might take slightly longer due to multiple file operations. +- It is the default behavior when using the `--all-databases` flag. +- It does not backup system databases (`information_schema`, `performance_schema`, `mysql`, `sys`, `innodb`). + +**Command:** + +```bash +docker run --rm --network your_network_name \ + -v $PWD/backup:/backup/ \ + -e "DB_HOST=dbhost" \ + -e "DB_PORT=3306" \ + -e "DB_USERNAME=username" \ + -e "DB_PASSWORD=password" \ + jkaninda/mysql-bkup backup --all-databases +``` +### Single Backup File + +Using --all-in-one (-A) creates a single backup file containing all databases. + +- Creates a single backup file containing all databases. +- Easier to manage if you need to restore everything at once. +- Faster to back up and restore in bulk. +- Can be problematic if you only need to restore a specific database or table. +- It is recommended to use this option for disaster recovery purposes. +- It backups system databases as well. + +```bash +docker run --rm --network your_network_name \ + -v $PWD/backup:/backup/ \ + -e "DB_HOST=dbhost" \ + -e "DB_PORT=3306" \ + -e "DB_USERNAME=username" \ + -e "DB_PASSWORD=password" \ + jkaninda/mysql-bkup backup --all-in-one +``` + +### When to Use Which? + +- Use `--all-in-one` if you want a quick, simple backup for disaster recovery where you'll restore everything at once. +- Use `--all-databases` if you need granularity in restoring specific databases or tables without affecting others. diff --git a/docs/how-tos/mutli-backup.md b/docs/how-tos/mutli-backup.md index b9617ef..49d367c 100644 --- a/docs/how-tos/mutli-backup.md +++ b/docs/how-tos/mutli-backup.md @@ -34,8 +34,8 @@ Below is an example configuration file (`config.yaml`) that defines multiple dat ```yaml # Optional: Define a global cron expression for scheduled backups. # Example: "@every 20m" (runs every 20 minutes). If omitted, backups run immediately. -cronExpression: "" - +cronExpression: "" # Optional: Define a global cron expression for scheduled backups. +backupRescueMode: false # Optional: Set to true to enable rescue mode for backups. databases: - host: mysql1 # Optional: Overrides DB_HOST or uses DB_HOST_DATABASE1. port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_DATABASE1. diff --git a/docs/how-tos/receive-notification.md b/docs/how-tos/receive-notification.md index 3067c92..43fa17f 100644 --- a/docs/how-tos/receive-notification.md +++ b/docs/how-tos/receive-notification.md @@ -2,7 +2,7 @@ title: Receive notifications layout: default parent: How Tos -nav_order: 12 +nav_order: 13 --- # Receive Notifications diff --git a/docs/reference/index.md b/docs/reference/index.md index aa3c81d..55c7134 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -6,28 +6,30 @@ nav_order: 3 # Configuration Reference -Backup, restore, and migration targets, schedules, and retention policies are configured using **environment variables** or **CLI flags**. - ---- +MySQL backup, restore, and migration processes can be configured using **environment variables** or **CLI flags**. ## CLI Utility Usage -| Option | Short Flag | Description | -|-------------------------|------------|-------------------------------------------------------------------------------| -| `pg-bkup` | `bkup` | CLI utility for managing PostgreSQL backups. | -| `backup` | | Perform a backup operation. | -| `restore` | | Perform a restore operation. | -| `migrate` | | Migrate a database from one instance to another. | -| `--storage` | `-s` | Storage type (`local`, `s3`, `ssh`, etc.). Default: `local`. | -| `--file` | `-f` | File name for restoration. | -| `--path` | | Path for storage (e.g., `/custom_path` for S3 or `/home/foo/backup` for SSH). | -| `--config` | `-c` | Configuration file for multi database backup. (e.g: `/backup/config.yaml`). | -| `--dbname` | `-d` | Database name. | -| `--port` | `-p` | Database port. Default: `3306`. | -| `--disable-compression` | | Disable compression for database backups. | -| `--cron-expression` | `-e` | Cron expression for scheduled backups (e.g., `0 0 * * *` or `@daily`). | -| `--help` | `-h` | Display help message and exit. | -| `--version` | `-V` | Display version information and exit. | +The `mysql-bkup` CLI provides commands and options to manage MySQL backups efficiently. + +| Option | Short Flag | Description | +|-------------------------|------------|-----------------------------------------------------------------------------------------| +| `mysql-bkup` | `bkup` | CLI tool for managing MySQL backups, restoration, and migration. | +| `backup` | | Executes a backup operation. | +| `restore` | | Restores a database from a backup file. | +| `migrate` | | Migrates a database from one instance to another. | +| `--storage` | `-s` | Specifies the storage type (`local`, `s3`, `ssh`, etc.). Default: `local`. | +| `--file` | `-f` | Defines the backup file name for restoration. | +| `--path` | | Sets the storage path (e.g., `/custom_path` for S3 or `/home/foo/backup` for SSH). | +| `--config` | `-c` | Provides a configuration file for multi-database backups (e.g., `/backup/config.yaml`). | +| `--dbname` | `-d` | Specifies the database name to back up or restore. | +| `--port` | `-p` | Defines the database port. Default: `3306`. | +| `--disable-compression` | | Disables compression for database backups. | +| `--cron-expression` | `-e` | Schedules backups using a cron expression (e.g., `0 0 * * *` or `@daily`). | +| `--all-databases` | `-a` | Backs up all databases separately (e.g., `backup --all-databases`). | +| `--all-in-one` | `-A` | Backs up all databases in a single file (e.g., `backup --all-databases --single-file`). | +| `--help` | `-h` | Displays the help message and exits. | +| `--version` | `-V` | Shows version information and exits. | --- @@ -40,6 +42,8 @@ Backup, restore, and migration targets, schedules, and retention policies are co | `DB_NAME` | Optional (if provided via `-d` flag) | Database name. | | `DB_USERNAME` | Required | Database username. | | `DB_PASSWORD` | Required | Database password. | +| `DB_SSL_CA` | Optional | Database client CA certificate file | +| `DB_SSL_MODE` | Optional(`0 or 1`) default: `0` | Database client Enable CA validation | | `AWS_ACCESS_KEY` | Required for S3 storage | AWS S3 Access Key. | | `AWS_SECRET_KEY` | Required for S3 storage | AWS S3 Secret Key. | | `AWS_BUCKET_NAME` | Required for S3 storage | AWS S3 Bucket Name. | diff --git a/migrations/init.sql b/migrations/init.sql new file mode 100644 index 0000000..11eb42e --- /dev/null +++ b/migrations/init.sql @@ -0,0 +1,35 @@ +-- Create the database testdb2 and testdb3 +CREATE DATABASE IF NOT EXISTS testdb2; +CREATE DATABASE IF NOT EXISTS testdb3; +CREATE DATABASE IF NOT EXISTS fakedb; +USE testdb; + +-- Create the 'users' table +CREATE TABLE users ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(100) NOT NULL, + email VARCHAR(100) NOT NULL UNIQUE, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Create the 'orders' table +CREATE TABLE orders ( + id INT AUTO_INCREMENT PRIMARY KEY, + user_id INT NOT NULL, + amount DECIMAL(10,2) NOT NULL, + status ENUM('pending', 'completed', 'canceled') NOT NULL DEFAULT 'pending', + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE +); + +-- Insert fake users +INSERT INTO users (name, email) VALUES + ('Alice Smith', 'alice@example.com'), + ('Bob Johnson', 'bob@example.com'), + ('Charlie Brown', 'charlie@example.com'); + +-- Insert fake orders +INSERT INTO orders (user_id, amount, status) VALUES + (1, 100.50, 'completed'), + (2, 200.75, 'pending'), + (3, 50.00, 'canceled'); diff --git a/migrations/test_config.yaml b/migrations/test_config.yaml new file mode 100644 index 0000000..97a8b56 --- /dev/null +++ b/migrations/test_config.yaml @@ -0,0 +1,13 @@ +#cronExpression: "@every 20s" +#backupRescueMode: false +databases: + - host: 127.0.0.1 + port: 3306 + name: testdb + user: user + password: password + - name: testdb2 + # database credentials from environment variables + #TESTDB2_DB_USERNAME + #TESTDB2_DB_PASSWORD + #TESTDB2_DB_HOST \ No newline at end of file diff --git a/pkg/azure.go b/pkg/azure.go index 7a0c602..b16edcc 100644 --- a/pkg/azure.go +++ b/pkg/azure.go @@ -39,7 +39,7 @@ func azureBackup(db *dbConfig, config *BackupConfig) { utils.Info("Backup database to Azure Blob Storage") // Backup database - err := BackupDatabase(db, config.backupFileName, disableCompression) + err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne) if err != nil { recoverMode(err, "Error backing up database") return @@ -104,7 +104,7 @@ func azureBackup(db *dbConfig, config *BackupConfig) { }) // Delete temp deleteTemp() - utils.Info("Backup successfully completed in %s", duration) + utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration) } func azureRestore(db *dbConfig, conf *RestoreConfig) { utils.Info("Restore database from Azure Blob storage") diff --git a/pkg/backup.go b/pkg/backup.go index f0a4144..3c45164 100644 --- a/pkg/backup.go +++ b/pkg/backup.go @@ -26,6 +26,8 @@ SOFTWARE. package pkg import ( + "bytes" + "errors" "fmt" "github.com/jkaninda/encryptor" "github.com/jkaninda/go-storage/pkg/local" @@ -33,10 +35,10 @@ import ( "github.com/jkaninda/mysql-bkup/utils" "github.com/robfig/cron/v3" "github.com/spf13/cobra" - "log" "os" "os/exec" "path/filepath" + "strings" "time" ) @@ -49,7 +51,8 @@ func StartBackup(cmd *cobra.Command) { if err != nil { dbConf = initDbConfig(cmd) if config.cronExpression == "" { - BackupTask(dbConf, config) + config.allowCustomName = true + createBackupTask(dbConf, config) } else { if utils.IsValidCronExpression(config.cronExpression) { scheduledMode(dbConf, config) @@ -83,7 +86,7 @@ func scheduledMode(db *dbConfig, config *BackupConfig) { c := cron.New() _, err = c.AddFunc(config.cronExpression, func() { - BackupTask(db, config) + createBackupTask(db, config) utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat)) }) @@ -105,28 +108,66 @@ func multiBackupTask(databases []Database, bkConfig *BackupConfig) { if db.Path != "" { bkConfig.remotePath = db.Path } - BackupTask(getDatabase(db), bkConfig) + createBackupTask(getDatabase(db), bkConfig) } } -// BackupTask backups database -func BackupTask(db *dbConfig, config *BackupConfig) { +// createBackupTask backup task +func createBackupTask(db *dbConfig, config *BackupConfig) { + if config.all && !config.allInOne { + backupAll(db, config) + } else { + backupTask(db, config) + } +} + +// backupAll backup all databases +func backupAll(db *dbConfig, config *BackupConfig) { + databases, err := listDatabases(*db) + if err != nil { + utils.Fatal("Error listing databases: %s", err) + } + for _, dbName := range databases { + if dbName == "information_schema" || dbName == "performance_schema" || dbName == "mysql" || dbName == "sys" || dbName == "innodb" || dbName == "Database" { + continue + } + db.dbName = dbName + config.backupFileName = fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405")) + backupTask(db, config) + } + +} + +// backupTask backup task +func backupTask(db *dbConfig, config *BackupConfig) { utils.Info("Starting backup task...") startTime = time.Now() + prefix := db.dbName + if config.all && config.allInOne { + prefix = "all_databases" + } + // Generate file name - backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405")) + backupFileName := fmt.Sprintf("%s_%s.sql.gz", prefix, time.Now().Format("20060102_150405")) if config.disableCompression { - backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405")) + backupFileName = fmt.Sprintf("%s_%s.sql", prefix, time.Now().Format("20060102_150405")) + } + if config.customName != "" && config.allowCustomName && !config.all { + backupFileName = fmt.Sprintf("%s.sql.gz", config.customName) + if config.disableCompression { + backupFileName = fmt.Sprintf("%s.sql", config.customName) + } } config.backupFileName = backupFileName - switch config.storage { + s := strings.ToLower(config.storage) + switch s { case "local": localBackup(db, config) - case "s3", "S3": + case "s3": s3Backup(db, config) - case "ssh", "SSH", "remote", "sftp": + case "ssh", "remote", "sftp": sshBackup(db, config) - case "ftp", "FTP": + case "ftp": ftpBackup(db, config) case "azure": azureBackup(db, config) @@ -134,6 +175,8 @@ func BackupTask(db *dbConfig, config *BackupConfig) { localBackup(db, config) } } + +// startMultiBackup start multi backup func startMultiBackup(bkConfig *BackupConfig, configFile string) { utils.Info("Starting Multi backup task...") conf, err := readConf(configFile) @@ -196,82 +239,79 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) { } // BackupDatabase backup database -func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) error { +func BackupDatabase(db *dbConfig, backupFileName string, disableCompression, all, singleFile bool) error { storagePath = os.Getenv("STORAGE_PATH") - utils.Info("Starting database backup...") - err := os.Setenv("MYSQL_PWD", db.dbPassword) - if err != nil { - return fmt.Errorf("failed to set MYSQL_PWD environment variable: %v", err) + if err := testDatabaseConnection(db); err != nil { + return fmt.Errorf("database connection failed: %w", err) } - err = testDatabaseConnection(db) - if err != nil { - return fmt.Errorf("failed to connect to the database: %v", err) - } - // Backup Database database - utils.Info("Backing up database...") - - // Verify is compression is disabled - if disableCompression { - // Execute mysqldump - cmd := exec.Command("mysqldump", - "-h", db.dbHost, - "-P", db.dbPort, - "-u", db.dbUserName, - db.dbName, - ) - output, err := cmd.Output() - if err != nil { - return fmt.Errorf("failed to backup database: %v", err) - } - - // save output - file, err := os.Create(filepath.Join(tmpPath, backupFileName)) - if err != nil { - return fmt.Errorf("failed to create backup file: %v", err) - } - defer func(file *os.File) { - err := file.Close() - if err != nil { - return - } - }(file) - - _, err = file.Write(output) - if err != nil { - return err - } - utils.Info("Database has been backed up") + dumpArgs := []string{fmt.Sprintf("--defaults-file=%s", mysqlClientConfig)} + if all && singleFile { + dumpArgs = append(dumpArgs, "--all-databases", "--single-transaction", "--routines", "--triggers") } else { - // Execute mysqldump - cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName) - stdout, err := cmd.StdoutPipe() - if err != nil { - return fmt.Errorf("failed to backup database: %v", err) - } - gzipCmd := exec.Command("gzip") - gzipCmd.Stdin = stdout - gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName)) - err = gzipCmd.Start() - if err != nil { - return fmt.Errorf("failed to backup database: %v", err) - } - if err := cmd.Run(); err != nil { - log.Fatal(err) - } - if err := gzipCmd.Wait(); err != nil { - log.Fatal(err) - } - + dumpArgs = append(dumpArgs, db.dbName) } + + backupPath := filepath.Join(tmpPath, backupFileName) + if disableCompression { + return runCommandAndSaveOutput("mysqldump", dumpArgs, backupPath) + } + return runCommandWithCompression("mysqldump", dumpArgs, backupPath) +} + +// runCommandAndSaveOutput runs a command and saves the output to a file +func runCommandAndSaveOutput(command string, args []string, outputPath string) error { + cmd := exec.Command(command, args...) + output, err := cmd.Output() + if err != nil { + return fmt.Errorf("failed to execute %s: %v, output: %s", command, err, string(output)) + } + + return os.WriteFile(outputPath, output, 0644) +} + +// runCommandWithCompression runs a command and compresses the output +func runCommandWithCompression(command string, args []string, outputPath string) error { + cmd := exec.Command(command, args...) + stdout, err := cmd.StdoutPipe() + if err != nil { + return fmt.Errorf("failed to create stdout pipe: %w", err) + } + + gzipCmd := exec.Command("gzip") + gzipCmd.Stdin = stdout + gzipFile, err := os.Create(outputPath) + if err != nil { + return fmt.Errorf("failed to create gzip file: %w", err) + } + defer func(gzipFile *os.File) { + err := gzipFile.Close() + if err != nil { + utils.Error("Error closing gzip file: %v", err) + } + }(gzipFile) + gzipCmd.Stdout = gzipFile + + if err := gzipCmd.Start(); err != nil { + return fmt.Errorf("failed to start gzip: %w", err) + } + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to execute %s: %w", command, err) + } + if err := gzipCmd.Wait(); err != nil { + return fmt.Errorf("failed to wait for gzip completion: %w", err) + } + utils.Info("Database has been backed up") return nil } + +// localBackup backup database to local storage func localBackup(db *dbConfig, config *BackupConfig) { utils.Info("Backup database to local storage") - err := BackupDatabase(db, config.backupFileName, disableCompression) + err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne) if err != nil { recoverMode(err, "Error backing up database") return @@ -318,9 +358,10 @@ func localBackup(db *dbConfig, config *BackupConfig) { } // Delete temp deleteTemp() - utils.Info("Backup successfully completed in %s", duration) + utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration) } +// encryptBackup encrypt backup func encryptBackup(config *BackupConfig) { backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName)) outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension) @@ -350,6 +391,31 @@ func encryptBackup(config *BackupConfig) { } } + +// listDatabases list all databases +func listDatabases(db dbConfig) ([]string, error) { + databases := []string{} + // Create the mysql client config file + if err := createMysqlClientConfigFile(db); err != nil { + return databases, errors.New(err.Error()) + } + utils.Info("Listing databases...") + // Step 1: List all databases + cmd := exec.Command("mariadb", fmt.Sprintf("--defaults-file=%s", mysqlClientConfig), "-e", "SHOW DATABASES;") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return databases, fmt.Errorf("failed to list databases: %s", err) + } + // Step 2: Parse the output + for _, _db := range strings.Split(out.String(), "\n") { + if _db != "" { + databases = append(databases, _db) + } + } + return databases, nil +} func recoverMode(err error, msg string) { if err != nil { if backupRescueMode { @@ -360,6 +426,7 @@ func recoverMode(err error, msg string) { } else { utils.Error("Error: %s", msg) utils.Fatal("Error: %v", err) + return } } diff --git a/pkg/config.go b/pkg/config.go index 7573c60..19e8f3d 100644 --- a/pkg/config.go +++ b/pkg/config.go @@ -77,6 +77,10 @@ type BackupConfig struct { publicKey string storage string cronExpression string + all bool + allInOne bool + customName string + allowCustomName bool } type FTPConfig struct { host string @@ -251,11 +255,18 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig { remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH") storage = utils.GetEnv(cmd, "storage", "STORAGE") prune := false + configFile := os.Getenv("BACKUP_CONFIG_FILE") backupRetention := utils.GetIntEnv("BACKUP_RETENTION_DAYS") if backupRetention > 0 { prune = true } disableCompression, _ = cmd.Flags().GetBool("disable-compression") + customName, _ := cmd.Flags().GetString("custom-name") + all, _ := cmd.Flags().GetBool("all-databases") + allInOne, _ := cmd.Flags().GetBool("all-in-one") + if allInOne { + all = true + } _, _ = cmd.Flags().GetString("mode") passphrase := os.Getenv("GPG_PASSPHRASE") _ = utils.GetEnv(cmd, "path", "AWS_S3_PATH") @@ -269,6 +280,10 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig { encryption = true usingKey = false } + dbName := os.Getenv("DB_NAME") + if dbName == "" && !all && configFile == "" { + utils.Fatal("Database name is required, use DB_NAME environment variable or -d flag") + } // Initialize backup configs config := BackupConfig{} config.backupRetention = backupRetention @@ -281,6 +296,9 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig { config.publicKey = publicKeyFile config.usingKey = usingKey config.cronExpression = cronExpression + config.all = all + config.allInOne = allInOne + config.customName = customName return &config } diff --git a/pkg/helper.go b/pkg/helper.go index 4ca5030..298dd1e 100644 --- a/pkg/helper.go +++ b/pkg/helper.go @@ -26,7 +26,9 @@ package pkg import ( "bytes" + "errors" "fmt" + goutils "github.com/jkaninda/go-utils" "github.com/jkaninda/mysql-bkup/utils" "gopkg.in/yaml.v3" "os" @@ -36,7 +38,7 @@ import ( ) func intro() { - fmt.Println("Starting MySQL Backup...") + fmt.Println("Starting MYSQL-BKUP...") fmt.Printf("Version: %s\n", utils.Version) fmt.Println("Copyright (c) 2024 Jonas Kaninda") } @@ -65,25 +67,28 @@ func deleteTemp() { } } -// TestDatabaseConnection tests the database connection +// TestDatabaseConnection tests the database connection func testDatabaseConnection(db *dbConfig) error { - err := os.Setenv("MYSQL_PWD", db.dbPassword) - if err != nil { - return fmt.Errorf("failed to set MYSQL_PWD environment variable: %v", err) + // Create the mysql client config file + if err := createMysqlClientConfigFile(*db); err != nil { + return errors.New(err.Error()) } utils.Info("Connecting to %s database ...", db.dbName) // Set database name for notification error utils.DatabaseName = db.dbName - cmd := exec.Command("mariadb", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit") + + // Prepare the command to test the database connection + cmd := exec.Command("mariadb", fmt.Sprintf("--defaults-file=%s", mysqlClientConfig), db.dbName, "-e", "quit") // Capture the output var out bytes.Buffer cmd.Stdout = &out cmd.Stderr = &out - err = cmd.Run() - if err != nil { - return fmt.Errorf("failed to connect to %s database: %v", db.dbName, err) + // Run the command + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to connect to database %s: %v, output: %s", db.dbName, err, out.String()) } + utils.Info("Successfully connected to %s database", db.dbName) return nil } @@ -183,3 +188,16 @@ func RemoveLastExtension(filename string) string { } return filename } + +// Create mysql client config file +func createMysqlClientConfigFile(db dbConfig) error { + caCertPath := goutils.GetStringEnvWithDefault("DB_SSL_CA", "/etc/ssl/certs/ca-certificates.crt") + sslMode := goutils.GetStringEnvWithDefault("DB_SSL_MODE", "0") + // Create the mysql client config file + mysqlClientConfigFile := filepath.Join(tmpPath, "my.cnf") + mysqlCl := fmt.Sprintf("[client]\nhost=%s\nport=%s\nuser=%s\npassword=%s\nssl-ca=%s\nssl=%s\n", db.dbHost, db.dbPort, db.dbUserName, db.dbPassword, caCertPath, sslMode) + if err := os.WriteFile(mysqlClientConfigFile, []byte(mysqlCl), 0644); err != nil { + return fmt.Errorf("failed to create mysql client config file: %v", err) + } + return nil +} diff --git a/pkg/migrate.go b/pkg/migrate.go index 1edfd3e..e6d7fb7 100644 --- a/pkg/migrate.go +++ b/pkg/migrate.go @@ -51,7 +51,7 @@ func StartMigration(cmd *cobra.Command) { conf := &RestoreConfig{} conf.file = backupFileName // Backup source Database - err := BackupDatabase(dbConf, backupFileName, true) + err := BackupDatabase(dbConf, backupFileName, true, false, false) if err != nil { utils.Fatal("Error backing up database: %s", err) } diff --git a/pkg/remote.go b/pkg/remote.go index d01069d..2989b21 100644 --- a/pkg/remote.go +++ b/pkg/remote.go @@ -39,7 +39,7 @@ import ( func sshBackup(db *dbConfig, config *BackupConfig) { utils.Info("Backup database to Remote server") // Backup database - err := BackupDatabase(db, config.backupFileName, disableCompression) + err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne) if err != nil { recoverMode(err, "Error backing up database") return @@ -108,7 +108,7 @@ func sshBackup(db *dbConfig, config *BackupConfig) { }) // Delete temp deleteTemp() - utils.Info("Backup successfully completed in %s", duration) + utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration) } func remoteRestore(db *dbConfig, conf *RestoreConfig) { @@ -160,7 +160,7 @@ func ftpBackup(db *dbConfig, config *BackupConfig) { utils.Info("Backup database to the remote FTP server") // Backup database - err := BackupDatabase(db, config.backupFileName, disableCompression) + err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne) if err != nil { recoverMode(err, "Error backing up database") return @@ -224,5 +224,5 @@ func ftpBackup(db *dbConfig, config *BackupConfig) { }) // Delete temp deleteTemp() - utils.Info("Backup successfully completed in %s", duration) + utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration) } diff --git a/pkg/restore.go b/pkg/restore.go index 1dfba20..42633fc 100644 --- a/pkg/restore.go +++ b/pkg/restore.go @@ -25,6 +25,7 @@ SOFTWARE. package pkg import ( + "fmt" "github.com/jkaninda/encryptor" "github.com/jkaninda/go-storage/pkg/local" "github.com/jkaninda/mysql-bkup/utils" @@ -56,11 +57,17 @@ func StartRestore(cmd *cobra.Command) { } func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) { utils.Info("Restore database from local") + basePath := filepath.Dir(restoreConf.file) + fileName := filepath.Base(restoreConf.file) + restoreConf.file = fileName + if basePath == "" || basePath == "." { + basePath = storagePath + } localStorage := local.NewStorage(local.Config{ - RemotePath: storagePath, + RemotePath: basePath, LocalPath: tmpPath, }) - err := localStorage.CopyFrom(restoreConf.file) + err := localStorage.CopyFrom(fileName) if err != nil { utils.Fatal("Error copying backup file: %s", err) } @@ -68,91 +75,79 @@ func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) { } -// RestoreDatabase restore database +// RestoreDatabase restores the database from a backup file func RestoreDatabase(db *dbConfig, conf *RestoreConfig) { if conf.file == "" { utils.Fatal("Error, file required") } - extension := filepath.Ext(filepath.Join(tmpPath, conf.file)) - rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file)) - outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file)) + + filePath := filepath.Join(tmpPath, conf.file) + rFile, err := os.ReadFile(filePath) if err != nil { - utils.Fatal("Error reading backup file: %s ", err) + utils.Fatal("Error reading backup file: %v", err) } + extension := filepath.Ext(filePath) + outputFile := RemoveLastExtension(filePath) + if extension == ".gpg" { - - if conf.usingKey { - utils.Info("Decrypting backup using private key...") - utils.Warn("Backup decryption using a private key is not fully supported") - prKey, err := os.ReadFile(conf.privateKey) - if err != nil { - utils.Fatal("Error reading public key: %s ", err) - } - err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase) - if err != nil { - utils.Fatal("error during decrypting backup %v", err) - } - utils.Info("Decrypting backup using private key...done") - } else { - if conf.passphrase == "" { - utils.Error("Error, passphrase or private key required") - utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.") - } else { - utils.Info("Decrypting backup using passphrase...") - // decryptWithGPG file - err := encryptor.Decrypt(rFile, outputFile, conf.passphrase) - if err != nil { - utils.Fatal("Error decrypting file %s %v", file, err) - } - utils.Info("Decrypting backup using passphrase...done") - // Update file name - conf.file = RemoveLastExtension(file) - } - } - + decryptBackup(conf, rFile, outputFile) } - if utils.FileExists(filepath.Join(tmpPath, conf.file)) { - err := os.Setenv("MYSQL_PWD", db.dbPassword) + restorationFile := filepath.Join(tmpPath, conf.file) + if !utils.FileExists(restorationFile) { + utils.Fatal("File not found: %s", restorationFile) + } + + if err := testDatabaseConnection(db); err != nil { + utils.Fatal("Error connecting to the database: %v", err) + } + + utils.Info("Restoring database...") + restoreDatabaseFile(db, restorationFile) +} + +func decryptBackup(conf *RestoreConfig, rFile []byte, outputFile string) { + if conf.usingKey { + utils.Info("Decrypting backup using private key...") + prKey, err := os.ReadFile(conf.privateKey) if err != nil { - return + utils.Fatal("Error reading private key: %v", err) } - err = testDatabaseConnection(db) - if err != nil { - utils.Fatal("Error connecting to the database %v", err) + if err := encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase); err != nil { + utils.Fatal("Error decrypting backup: %v", err) } - utils.Info("Restoring database...") - - extension := filepath.Ext(filepath.Join(tmpPath, conf.file)) - // Restore from compressed file / .sql.gz - if extension == ".gz" { - str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mariadb -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName - _, err := exec.Command("sh", "-c", str).Output() - if err != nil { - utils.Fatal("Error, in restoring the database %v", err) - } - utils.Info("Restoring database... done") - utils.Info("Database has been restored") - // Delete temp - deleteTemp() - - } else if extension == ".sql" { - // Restore from sql file - str := "cat " + filepath.Join(tmpPath, conf.file) + " | mariadb -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName - _, err := exec.Command("sh", "-c", str).Output() - if err != nil { - utils.Fatal("Error in restoring the database %v", err) - } - utils.Info("Restoring database... done") - utils.Info("Database has been restored") - // Delete temp - deleteTemp() - } else { - utils.Fatal("Unknown file extension %s", extension) - } - } else { - utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file)) + if conf.passphrase == "" { + utils.Fatal("Passphrase or private key required for GPG file.") + } + utils.Info("Decrypting backup using passphrase...") + if err := encryptor.Decrypt(rFile, outputFile, conf.passphrase); err != nil { + utils.Fatal("Error decrypting file: %v", err) + } + conf.file = RemoveLastExtension(conf.file) } } + +func restoreDatabaseFile(db *dbConfig, restorationFile string) { + extension := filepath.Ext(restorationFile) + var cmdStr string + + switch extension { + case ".gz": + cmdStr = fmt.Sprintf("zcat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName) + case ".sql": + cmdStr = fmt.Sprintf("cat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName) + default: + utils.Fatal("Unknown file extension: %s", extension) + } + + cmd := exec.Command("sh", "-c", cmdStr) + output, err := cmd.CombinedOutput() + if err != nil { + utils.Fatal("Error restoring database: %v\nOutput: %s", err, string(output)) + } + + utils.Info("Database has been restored successfully.") + deleteTemp() +} diff --git a/pkg/s3.go b/pkg/s3.go index 90822b9..be80481 100644 --- a/pkg/s3.go +++ b/pkg/s3.go @@ -39,7 +39,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) { utils.Info("Backup database to s3 storage") // Backup database - err := BackupDatabase(db, config.backupFileName, disableCompression) + err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne) if err != nil { recoverMode(err, "Error backing up database") return @@ -107,7 +107,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) { }) // Delete temp deleteTemp() - utils.Info("Backup successfully completed in %s", duration) + utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration) } func s3Restore(db *dbConfig, conf *RestoreConfig) { diff --git a/pkg/var.go b/pkg/var.go index 24a44df..a00ecd7 100644 --- a/pkg/var.go +++ b/pkg/var.go @@ -24,7 +24,10 @@ SOFTWARE. package pkg -import "time" +import ( + "path/filepath" + "time" +) const tmpPath = "/tmp/backup" const gpgHome = "/config/gnupg" @@ -43,6 +46,7 @@ var ( backupSize int64 = 0 startTime = time.Now() backupRescueMode = false + mysqlClientConfig = filepath.Join(tmpPath, "my.cnf") ) // dbHVars Required environment variables for database @@ -50,7 +54,6 @@ var dbHVars = []string{ "DB_HOST", "DB_PASSWORD", "DB_USERNAME", - "DB_NAME", } var tdbRVars = []string{ "TARGET_DB_HOST",