Merge pull request #86 from jkaninda/ftp

merge ftp
This commit is contained in:
2024-09-30 00:25:14 +02:00
committed by GitHub
21 changed files with 271 additions and 79 deletions

View File

@@ -1,5 +1,5 @@
# PostgreSQL Backup # PostgreSQL Backup
PostgreSQL Backup is a Docker container image that can be used to backup, restore and migrate Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. PostgreSQL Backup is a Docker container image that can be used to backup, restore and migrate Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH compatible storage.
It also supports __encrypting__ your backups using GPG. It also supports __encrypting__ your backups using GPG.
The [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/pg-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes. The [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/pg-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
@@ -167,7 +167,7 @@ While it may work against different implementations, there are no guarantees abo
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy. - The original image is based on `Alpine` and requires additional tools, making it heavy.
- This image is written in Go. - This image is written in Go.
- `arm64` and `arm/v7` architectures are supported. - `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported. - Docker in Swarm mode is supported.

View File

@@ -9,7 +9,7 @@ RUN go mod download
# Build # Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/pg-bkup RUN CGO_ENABLED=0 GOOS=linux go build -o /app/pg-bkup
FROM ubuntu:24.04 FROM alpine:3.20.3
ENV DB_HOST="" ENV DB_HOST=""
ENV DB_NAME="" ENV DB_NAME=""
ENV DB_USERNAME="" ENV DB_USERNAME=""
@@ -25,17 +25,20 @@ ENV AWS_REGION="us-west-2"
ENV AWS_DISABLE_SSL="false" ENV AWS_DISABLE_SSL="false"
ENV GPG_PASSPHRASE="" ENV GPG_PASSPHRASE=""
ENV SSH_USER="" ENV SSH_USER=""
ENV SSH_REMOTE_PATH=""
ENV SSH_PASSWORD="" ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME="" ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE="" ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22" ENV SSH_PORT=22
ENV REMOTE_PATH=""
ENV FTP_HOST_NAME=""
ENV FTP_PORT=21
ENV FTP_USER=""
ENV FTP_PASSWORD=""
ENV TARGET_DB_HOST="" ENV TARGET_DB_HOST=""
ENV TARGET_DB_PORT=5432 ENV TARGET_DB_PORT=5432
ENV TARGET_DB_NAME="" ENV TARGET_DB_NAME=""
ENV TARGET_DB_USERNAME="" ENV TARGET_DB_USERNAME=""
ENV TARGET_DB_PASSWORD="" ENV TARGET_DB_PASSWORD=""
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.2.8" ENV VERSION="v1.2.8"
ENV BACKUP_CRON_EXPRESSION="" ENV BACKUP_CRON_EXPRESSION=""
ENV TG_TOKEN="" ENV TG_TOKEN=""
@@ -43,29 +46,16 @@ ENV TG_CHAT_ID=""
ARG WORKDIR="/config" ARG WORKDIR="/config"
ARG BACKUPDIR="/backup" ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup" ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
LABEL author="Jonas Kaninda" LABEL author="Jonas Kaninda"
RUN apk --update add postgresql-client gnupg
RUN apt-get update -qq
RUN apt install postgresql-client cron gnupg -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir $WORKDIR RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \ RUN chmod 777 $WORKDIR
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $WORKDIR && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT
COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup
RUN chmod +x /usr/local/bin/pg-bkup RUN chmod +x /usr/local/bin/pg-bkup

View File

@@ -1,13 +0,0 @@
[supervisord]
nodaemon=true
user=root
logfile=/var/log/supervisor/supervisord.log
pidfile=/var/run/supervisord.pid
[program:cron]
command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15"
autostart=true
autorestart=true
user = root
stderr_logfile=/var/log/cron.err.log
stdout_logfile=/var/log/cron.out.log

View File

@@ -0,0 +1,44 @@
---
title: Backup to FTP remote server
layout: default
parent: How Tos
nav_order: 4
---
# Backup to FTP remote server
As described for s3 backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note }
These environment variables are required for SSH backup `FTP_HOST_NAME`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`.
```yml
services:
pg-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/pg-bkup/releases
# for a list of available releases.
image: jkaninda/pg-bkup
container_name: pg-bkup
command: backup --storage ftp -d database
environment:
- DB_PORT=5432
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## FTP config
- FTP_HOST_NAME="hostname"
- FTP_PORT=21
- FTP_USER=user
- FTP_PASSWORD=password
- REMOTE_PATH=/home/jkaninda/backups
# pg-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -8,10 +8,10 @@ nav_order: 3
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`. As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable. You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note } {: .note }
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server. These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
Accessing the remote server using password is not recommended, use private key instead. Accessing the remote server using password is not recommended, use private key instead.
```yml ```yml
@@ -36,7 +36,7 @@ services:
- SSH_HOST_NAME="hostname" - SSH_HOST_NAME="hostname"
- SSH_PORT=22 - SSH_PORT=22
- SSH_USER=user - SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups - REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519 - SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password ## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password #- SSH_PASSWORD=password
@@ -76,7 +76,7 @@ services:
- SSH_HOST_NAME="hostname" - SSH_HOST_NAME="hostname"
- SSH_PORT=22 - SSH_PORT=22
- SSH_USER=user - SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups - REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519 - SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password ## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password #- SSH_PASSWORD=password
@@ -130,7 +130,7 @@ spec:
value: "22" value: "22"
- name: SSH_USER - name: SSH_USER
value: "xxx" value: "xxx"
- name: SSH_REMOTE_PATH - name: REMOTE_PATH
value: "/home/jkaninda/backups" value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY - name: AWS_ACCESS_KEY
value: "xxxx" value: "xxxx"

View File

@@ -2,7 +2,7 @@
title: Deploy on Kubernetes title: Deploy on Kubernetes
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 8 nav_order: 9
--- ---
## Deploy on Kubernetes ## Deploy on Kubernetes

View File

@@ -2,7 +2,7 @@
title: Encrypt backups using GPG title: Encrypt backups using GPG
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 7 nav_order: 8
--- ---
# Encrypt backup # Encrypt backup

View File

@@ -2,7 +2,7 @@
title: Migrate database title: Migrate database
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 9 nav_order: 10
--- ---
# Migrate database # Migrate database

View File

@@ -2,7 +2,7 @@
title: Restore database from AWS S3 title: Restore database from AWS S3
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 5 nav_order: 6
--- ---
# Restore database from S3 storage # Restore database from S3 storage

View File

@@ -2,7 +2,7 @@
title: Restore database from SSH title: Restore database from SSH
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 6 nav_order: 7
--- ---
# Restore database from SSH remote server # Restore database from SSH remote server

View File

@@ -2,7 +2,7 @@
title: Restore database title: Restore database
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 4 nav_order: 5
--- ---
# Restore database # Restore database

View File

@@ -6,7 +6,7 @@ nav_order: 1
# About pg-bkup # About pg-bkup
{:.no_toc} {:.no_toc}
PostreSQL Backup is a Docker container image that can be used to backup, restore and migrate Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. PostreSQL Backup is a Docker container image that can be used to backup, restore and migrate Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, ftp and SSH compatible storage.
It also supports database __encryption__ using GPG. It also supports database __encryption__ using GPG.
@@ -158,7 +158,7 @@ While it may work against different implementations, there are no guarantees abo
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy. - The original image is based on `Alpine` and requires additional tools, making it heavy.
- This image is written in Go. - This image is written in Go.
- `arm64` and `arm/v7` architectures are supported. - `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported. - Docker in Swarm mode is supported.

View File

@@ -55,7 +55,12 @@ Backup, restore and migrate targets, schedule and retention are configured using
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password | | SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key | | SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port | | SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) | | REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
| FTP_HOST_NAME | Optional, required for FTP storage | FTP host name |
| FTP_PORT | Optional, required for FTP storage | FTP server port number |
| FTP_USER | Optional, required for FTP storage | FTP user |
| FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
| TARGET_DB_HOST | Optional, required for database migration | Target database host | | TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TARGET_DB_PORT | Optional, required for database migration | Target database port | | TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TARGET_DB_NAME | Optional, required for database migration | Target database name | | TARGET_DB_NAME | Optional, required for database migration | Target database name |

3
go.mod
View File

@@ -10,7 +10,10 @@ require (
require ( require (
github.com/aws/aws-sdk-go v1.55.3 // indirect github.com/aws/aws-sdk-go v1.55.3 // indirect
github.com/bramvdbogaerde/go-scp v1.5.0 // indirect github.com/bramvdbogaerde/go-scp v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hpcloud/tail v1.0.0 // indirect github.com/hpcloud/tail v1.0.0 // indirect
github.com/jlaffaye/ftp v0.2.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect
golang.org/x/crypto v0.18.0 // indirect golang.org/x/crypto v0.18.0 // indirect

7
go.sum
View File

@@ -12,10 +12,17 @@ github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=

View File

@@ -81,7 +81,8 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
case "ssh", "remote": case "ssh", "remote":
sshBackup(db, config) sshBackup(db, config)
case "ftp": case "ftp":
utils.Fatal("Not supported storage type: %s", config.storage) ftpBackup(db, config)
//utils.Fatal("Not supported storage type: %s", config.storage)
default: default:
localBackup(db, config) localBackup(db, config)
} }
@@ -196,7 +197,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Info("Uploading backup archive to remote storage S3 ... ") utils.Info("Uploading backup archive to remote storage S3 ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path) err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
if err != nil { if err != nil {
utils.Fatal("Error uploading backup archive to S3: %s ", err) utils.Fatal("Error uploading backup archive to S3: %s ", err)
@@ -210,7 +211,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
} }
// Delete old backup // Delete old backup
if config.prune { if config.prune {
err := utils.DeleteOldBackup(bucket, s3Path, config.backupRetention) err := DeleteOldBackup(bucket, s3Path, config.backupRetention)
if err != nil { if err != nil {
utils.Fatal("Error deleting old backup from S3: %s ", err) utils.Fatal("Error deleting old backup from S3: %s ", err)
} }
@@ -256,6 +257,41 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
//Delete temp //Delete temp
deleteTemp() deleteTemp()
} }
func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server")
//Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config.backupFileName, config.passphrase)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName)
err := CopyToFTP(finalFileName, config.remotePath)
if err != nil {
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
//TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet")
}
utils.Done("Uploading backup archive to the remote FTP server ... done ")
//Send notification
utils.NotifySuccess(finalFileName)
//Delete temp
deleteTemp()
}
func encryptBackup(backupFileName, gpqPassphrase string) { func encryptBackup(backupFileName, gpqPassphrase string) {
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpqPassphrase) err := Encrypt(filepath.Join(tmpPath, backupFileName), gpqPassphrase)

View File

@@ -44,6 +44,29 @@ type BackupConfig struct {
storage string storage string
cronExpression string cronExpression string
} }
type FTPConfig struct {
host string
user string
password string
port string
remotePath string
}
func initFtpConfig() *FTPConfig {
//Initialize backup configs
fConfig := FTPConfig{}
fConfig.host = os.Getenv("FTP_HOST_NAME")
fConfig.user = os.Getenv("FTP_USER")
fConfig.password = os.Getenv("FTP_PASSWORD")
fConfig.port = os.Getenv("FTP_PORT")
fConfig.remotePath = os.Getenv("REMOTE_PATH")
err := utils.CheckEnvVars(ftpVars)
if err != nil {
utils.Error("Please make sure all required environment variables for FTP are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &fConfig
}
func initDbConfig(cmd *cobra.Command) *dbConfig { func initDbConfig(cmd *cobra.Command) *dbConfig {
//Set env //Set env
@@ -66,9 +89,9 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
utils.SetEnv("STORAGE_PATH", storagePath) utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION") utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION") utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env //Get flag value and set env
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH") remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE") storage = utils.GetEnv(cmd, "storage", "STORAGE")
backupRetention, _ := cmd.Flags().GetInt("keep-last") backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune") prune, _ := cmd.Flags().GetBool("prune")
@@ -106,10 +129,11 @@ type RestoreConfig struct {
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig { func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
utils.SetEnv("STORAGE_PATH", storagePath) utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env //Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH") s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH") remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE") storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME") file = utils.GetEnv(cmd, "file", "FILE_NAME")
_, _ = cmd.Flags().GetString("mode") _, _ = cmd.Flags().GetString("mode")

81
pkg/ftp.go Normal file
View File

@@ -0,0 +1,81 @@
package pkg
import (
"fmt"
"github.com/jlaffaye/ftp"
"io"
"os"
"path/filepath"
"time"
)
// initFtpClient initializes and authenticates an FTP client
func initFtpClient() (*ftp.ServerConn, error) {
ftpConfig := initFtpConfig()
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
if err != nil {
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
}
return ftpClient, nil
}
// CopyToFTP uploads a file to the remote FTP server
func CopyToFTP(fileName, remotePath string) (err error) {
ftpConfig := initFtpConfig()
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", fileName, err)
}
defer file.Close()
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
err = ftpClient.Stor(remoteFilePath, file)
if err != nil {
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
}
return nil
}
// CopyFromFTP downloads a file from the remote FTP server
func CopyFromFTP(fileName, remotePath string) (err error) {
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
remoteFilePath := filepath.Join(remotePath, fileName)
r, err := ftpClient.Retr(remoteFilePath)
if err != nil {
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
}
defer r.Close()
localFilePath := filepath.Join(tmpPath, fileName)
outFile, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
}
defer outFile.Close()
_, err = io.Copy(outFile, r)
if err != nil {
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
}
return nil
}

View File

@@ -30,7 +30,7 @@ func StartRestore(cmd *cobra.Command) {
case "ssh": case "ssh":
restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath) restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath)
case "ftp": case "ftp":
utils.Fatal("Restore from FTP is not yet supported") restoreFromFTP(dbConf, restoreConf.file, restoreConf.remotePath)
default: default:
utils.Info("Restore database from local") utils.Info("Restore database from local")
copyToTmp(storagePath, restoreConf.file) copyToTmp(storagePath, restoreConf.file)
@@ -40,7 +40,7 @@ func StartRestore(cmd *cobra.Command) {
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) { func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
utils.Info("Restore database from s3") utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path) err := DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil { if err != nil {
utils.Fatal("Error download file from s3 %s %v ", file, err) utils.Fatal("Error download file from s3 %s %v ", file, err)
} }
@@ -54,6 +54,14 @@ func restoreFromRemote(db *dbConfig, file, remotePath string) {
} }
RestoreDatabase(db, file) RestoreDatabase(db, file)
} }
func restoreFromFTP(db *dbConfig, file, remotePath string) {
utils.Info("Restore database from FTP server")
err := CopyFromFTP(file, remotePath)
if err != nil {
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(remotePath, file), err)
}
RestoreDatabase(db, file)
}
// RestoreDatabase restore database // RestoreDatabase restore database
func RestoreDatabase(db *dbConfig, file string) { func RestoreDatabase(db *dbConfig, file string) {
@@ -93,11 +101,11 @@ func RestoreDatabase(db *dbConfig, file string) {
testDatabaseConnection(db) testDatabaseConnection(db)
utils.Info("Restoring database...") utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file)) extension := filepath.Ext(file)
// Restore from compressed file / .sql.gz // Restore from compressed file / .sql.gz
if extension == ".gz" { if extension == ".gz" {
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName str := "zcat " + filepath.Join(tmpPath, file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("bash", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal("Error, in restoring the database %v", err) utils.Fatal("Error, in restoring the database %v", err)
} }
@@ -108,8 +116,8 @@ func RestoreDatabase(db *dbConfig, file string) {
} else if extension == ".sql" { } else if extension == ".sql" {
//Restore from sql file //Restore from sql file
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName str := "cat " + filepath.Join(tmpPath, file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("bash", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal("Error in restoring the database %v", err) utils.Fatal("Error in restoring the database %v", err)
} }

View File

@@ -1,10 +1,10 @@
// Package utils / // Package pkg
/***** /*****
@author Jonas Kaninda @author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT> @license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda @Copyright © 2024 Jonas Kaninda
**/ **/
package utils package pkg
import ( import (
"bytes" "bytes"
@@ -13,6 +13,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/jkaninda/pg-bkup/utils"
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
@@ -32,20 +33,20 @@ func CreateSession() (*session.Session, error) {
"AWS_REGION", "AWS_REGION",
} }
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT") endPoint := utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY") accessKey := utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY") secretKey := utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") _ = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
region := os.Getenv("AWS_REGION") region := os.Getenv("AWS_REGION")
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL")) awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil { if err != nil {
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err) utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
} }
err = CheckEnvVars(awsVars) err = utils.CheckEnvVars(awsVars)
if err != nil { if err != nil {
Fatal("Error checking environment variables\n: %s", err) utils.Fatal("Error checking environment variables\n: %s", err)
} }
// Configure to use MinIO Server // Configure to use MinIO Server
s3Config := &aws.Config{ s3Config := &aws.Config{
@@ -105,10 +106,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
if err != nil { if err != nil {
return err return err
} }
Info("Download backup from S3 storage...") utils.Info("Download backup from S3 storage...")
file, err := os.Create(filepath.Join(destinationPath, key)) file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil { if err != nil {
Error("Failed to create file", err) utils.Error("Failed to create file", err)
return err return err
} }
defer file.Close() defer file.Close()
@@ -122,10 +123,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
Key: aws.String(objectKey), Key: aws.String(objectKey),
}) })
if err != nil { if err != nil {
Error("Failed to download file", err) utils.Error("Failed to download file %s", key)
return err return err
} }
Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes) utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
return nil return nil
} }
@@ -155,18 +156,18 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
Key: object.Key, Key: object.Key,
}) })
if err != nil { if err != nil {
Info("Failed to delete object %s: %v", *object.Key, err) utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else { } else {
Info("Deleted object %s\n", *object.Key) utils.Info("Deleted object %s\n", *object.Key)
} }
} }
} }
return !lastPage return !lastPage
}) })
if err != nil { if err != nil {
Error("Failed to list objects: %v", err) utils.Error("Failed to list objects: %v", err)
} }
Info("Finished deleting old files.") utils.Info("Finished deleting old files.")
return nil return nil
} }

View File

@@ -41,9 +41,15 @@ var targetDbConf *targetDbConfig
// sshVars Required environment variables for SSH remote server storage // sshVars Required environment variables for SSH remote server storage
var sshVars = []string{ var sshVars = []string{
"SSH_USER", "SSH_USER",
"SSH_REMOTE_PATH",
"SSH_HOST_NAME", "SSH_HOST_NAME",
"SSH_PORT", "SSH_PORT",
"REMOTE_PATH",
}
var ftpVars = []string{
"FTP_HOST_NAME",
"FTP_USER",
"FTP_PASSWORD",
"FTP_PORT",
} }
// AwsVars Required environment variables for AWS S3 storage // AwsVars Required environment variables for AWS S3 storage