Migrate from s3fs to go aws s3 client

This commit is contained in:
2024-07-29 07:33:26 +02:00
parent 0aa38eec20
commit a8fa58dacb
11 changed files with 359 additions and 41 deletions

55
.github/workflows/deploy-docs.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: Deploy Documenation site to GitHub Pages
on:
push:
branches: ['main']
paths:
- 'docs/**'
- '.github/workflows/deploy-docs.yml'
workflow_dispatch:
permissions:
contents: read
pages: write
id-token: write
concurrency:
group: 'pages'
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: '3.2'
bundler-cache: true
cache-version: 0
working-directory: docs
- name: Setup Pages
id: pages
uses: actions/configure-pages@v2
- name: Build with Jekyll
working-directory: docs
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env:
JEKYLL_ENV: production
- name: Upload artifact
uses: actions/upload-pages-artifact@v1
with:
path: 'docs/_site/'
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: build
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v1

View File

@@ -17,7 +17,7 @@ docker-build:
docker build -f docker/Dockerfile -t jkaninda/pg-bkup:latest . docker build -f docker/Dockerfile -t jkaninda/pg-bkup:latest .
docker-run: docker-build docker-run: docker-build
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/pg-bkup bkup backup --prune --keep-last 2 docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/pg-bkup bkup backup --prune --keep-last 2
docker-run-scheduled: docker-build docker-run-scheduled: docker-build
@@ -27,6 +27,10 @@ docker-run-scheduled: docker-build
docker-run-scheduled-s3: docker-build docker-run-scheduled-s3: docker-build
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
docker-restore-s3: docker-build docker-run-s3: docker-build
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "FILE_NAME=${FILE_NAME}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path
docker-restore-s3: docker-build
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "FILE_NAME=${FILE_NAME}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path

View File

@@ -19,6 +19,13 @@ ENV STORAGE=local
ENV BUCKET_NAME="" ENV BUCKET_NAME=""
ENV ACCESS_KEY="" ENV ACCESS_KEY=""
ENV SECRET_KEY="" ENV SECRET_KEY=""
ENV REGION=""
ENV SSH_USER=""
ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE="/root/.ssh/id_rsa"
ENV GPG_PASS_PHRASE=""
ENV SSH_PORT="22"
ENV S3_ENDPOINT=https://s3.amazonaws.com ENV S3_ENDPOINT=https://s3.amazonaws.com
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v0.6" ENV VERSION="v0.6"
@@ -26,7 +33,7 @@ LABEL authors="Jonas Kaninda"
RUN apt-get update -qq RUN apt-get update -qq
RUN apt install s3fs postgresql-client postgresql-client-common libpq-dev supervisor cron -y RUN apt install postgresql-client postgresql-client-common supervisor cron openssh-client -y
# Clear cache # Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/* RUN apt-get clean && rm -rf /var/lib/apt/lists/*
@@ -45,4 +52,5 @@ ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
RUN mkdir /backup RUN mkdir /backup
WORKDIR /backup RUN mkdir /tmp/pg-bkup
WORKDIR /root

3
go.mod
View File

@@ -8,8 +8,9 @@ require (
) )
require ( require (
github.com/aws/aws-sdk-go v1.55.3 // indirect
github.com/hpcloud/tail v1.0.0 // indirect github.com/hpcloud/tail v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
golang.org/x/sys v0.22.0 // indirect golang.org/x/sys v0.22.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect

35
go.sum
View File

@@ -1,18 +1,53 @@
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0=
github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -33,16 +33,35 @@ func StartBackup(cmd *cobra.Command) {
prune, _ := cmd.Flags().GetBool("prune") prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression") disableCompression, _ = cmd.Flags().GetBool("disable-compression")
executionMode, _ = cmd.Flags().GetString("mode") executionMode, _ = cmd.Flags().GetString("mode")
dbName = os.Getenv("DB_NAME")
storagePath = os.Getenv("STORAGE_PATH")
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
if disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
}
if executionMode == "default" { if executionMode == "default" {
if storage == "s3" { switch storage {
case "s3":
utils.Info("Backup database to s3 storage") utils.Info("Backup database to s3 storage")
s3Backup(disableCompression, s3Path, prune, keepLast) BackupDatabase(backupFileName, disableCompression, prune, keepLast)
} else { s3Upload(backupFileName, s3Path)
case "local":
utils.Info("Backup database to local storage") utils.Info("Backup database to local storage")
BackupDatabase(disableCompression, prune, keepLast) BackupDatabase(backupFileName, disableCompression, prune, keepLast)
moveToBackup(backupFileName, storagePath)
case "ssh":
fmt.Println("x is 2")
case "ftp":
fmt.Println("x is 3")
default:
utils.Info("Backup database to local storage")
BackupDatabase(backupFileName, disableCompression, prune, keepLast)
moveToBackup(backupFileName, storagePath)
} }
} else if executionMode == "scheduled" { } else if executionMode == "scheduled" {
scheduledMode() scheduledMode()
} else { } else {
@@ -98,7 +117,7 @@ func scheduledMode() {
} }
// BackupDatabase backup database // BackupDatabase backup database
func BackupDatabase(disableCompression bool, prune bool, keepLast int) { func BackupDatabase(backupFileName string, disableCompression bool, prune bool, keepLast int) {
dbHost = os.Getenv("DB_HOST") dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD") dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME") dbUserName = os.Getenv("DB_USERNAME")
@@ -117,12 +136,9 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
utils.TestDatabaseConnection() utils.TestDatabaseConnection()
// Backup Database database // Backup Database database
utils.Info("Backing up database...") utils.Info("Backing up database...")
//Generate file name
bkFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
// Verify is compression is disabled // Verify is compression is disabled
if disableCompression { if disableCompression {
bkFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
// Execute pg_dump // Execute pg_dump
cmd := exec.Command("pg_dump", cmd := exec.Command("pg_dump",
"-h", dbHost, "-h", dbHost,
@@ -135,7 +151,7 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
log.Fatal(err) log.Fatal(err)
} }
// save output // save output
file, err := os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName)) file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@@ -145,7 +161,6 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
utils.Done("Database has been backed up")
} else { } else {
// Execute pg_dump // Execute pg_dump
@@ -162,7 +177,7 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
gzipCmd := exec.Command("gzip") gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout gzipCmd.Stdin = stdout
// save output // save output
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName)) gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
gzipCmd.Start() gzipCmd.Start()
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@@ -173,30 +188,62 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
if err := gzipCmd.Wait(); err != nil { if err := gzipCmd.Wait(); err != nil {
log.Fatal(err) log.Fatal(err)
} }
utils.Done("Database has been backed up")
} }
utils.Done("Database has been backed up")
//Delete old backup //Delete old backup
if prune { //if prune {
deleteOldBackup(keepLast) // deleteOldBackup(keepLast)
} //}
historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", storagePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", tmpPath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
defer historyFile.Close() defer historyFile.Close()
if _, err := historyFile.WriteString(bkFileName + "\n"); err != nil { if _, err := historyFile.WriteString(backupFileName + "\n"); err != nil {
log.Fatal(err) log.Fatal(err)
} }
} }
} }
func moveToBackup(backupFileName string, destinationPath string) {
//Copy backup from tmp folder to storage destination
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
if err != nil {
utils.Fatal("Error copying file ", backupFileName, err)
func s3Backup(disableCompression bool, s3Path string, prune bool, keepLast int) { }
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
fmt.Println("Error deleting file:", err)
}
utils.Done("Database has been backed up and copied to destination ")
}
func s3Upload(backupFileName string, s3Path string) {
bucket := os.Getenv("BUCKET_NAME")
utils.Info("Uploading file to S3 storage")
err := utils.UploadFileToS3(tmpPath, backupFileName, bucket, s3Path)
if err != nil {
utils.Fatalf("Error uploading file to S3: %s ", err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
fmt.Println("Error deleting file:", err)
}
utils.Done("Database has been backed up and uploaded to s3 ")
}
func s3Backup(backupFileName string, disableCompression bool, s3Path string, prune bool, keepLast int) {
// Backup Database to S3 storage // Backup Database to S3 storage
MountS3Storage(s3Path) //MountS3Storage(s3Path)
BackupDatabase(disableCompression, prune, keepLast) //BackupDatabase(backupFileName, disableCompression, prune, keepLast)
} }
func deleteOldBackup(keepLast int) { func deleteOldBackup(keepLast int) {
utils.Info("Deleting old backups...") utils.Info("Deleting old backups...")

1
pkg/encrypt_archive.go Normal file
View File

@@ -0,0 +1 @@
package pkg

View File

@@ -21,13 +21,34 @@ func StartRestore(cmd *cobra.Command) {
storage = utils.GetEnv(cmd, "storage", "STORAGE") storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME") file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode") executionMode, _ = cmd.Flags().GetString("mode")
bucket := os.Getenv("BUCKET_NAME")
if storage == "s3" { switch storage {
case "s3":
utils.Info("Restore database from s3") utils.Info("Restore database from s3")
s3Restore(file, s3Path) err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
} else { if err != nil {
utils.Fatal("Error download file from s3 ", file, err)
}
RestoreDatabase(file)
case "local":
utils.Info("Restore database from local")
copyTmp(storagePath, file)
RestoreDatabase(file)
case "ssh":
fmt.Println("x is 2")
case "ftp":
fmt.Println("x is 3")
default:
utils.Info("Restore database from local") utils.Info("Restore database from local")
RestoreDatabase(file) RestoreDatabase(file)
}
}
func copyTmp(sourcePath string, backupFileName string) {
//Copy backup from tmp folder to storage destination
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
if err != nil {
utils.Fatal("Error copying file ", backupFileName, err)
} }
} }
@@ -39,7 +60,7 @@ func RestoreDatabase(file string) {
dbUserName = os.Getenv("DB_USERNAME") dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME") dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT") dbPort = os.Getenv("DB_PORT")
storagePath = os.Getenv("STORAGE_PATH") //storagePath = os.Getenv("STORAGE_PATH")
if file == "" { if file == "" {
utils.Fatal("Error, file required") utils.Fatal("Error, file required")
} }
@@ -48,7 +69,7 @@ func RestoreDatabase(file string) {
utils.Fatal("Please make sure all required environment variables are set") utils.Fatal("Please make sure all required environment variables are set")
} else { } else {
if utils.FileExists(fmt.Sprintf("%s/%s", storagePath, file)) { if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
err := os.Setenv("PGPASSWORD", dbPassword) err := os.Setenv("PGPASSWORD", dbPassword)
if err != nil { if err != nil {
@@ -56,10 +77,10 @@ func RestoreDatabase(file string) {
} }
utils.TestDatabaseConnection() utils.TestDatabaseConnection()
extension := filepath.Ext(fmt.Sprintf("%s/%s", storagePath, file)) extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
// Restore from compressed file / .sql.gz // Restore from compressed file / .sql.gz
if extension == ".gz" { if extension == ".gz" {
str := "zcat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME")
_, err := exec.Command("bash", "-c", str).Output() _, err := exec.Command("bash", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal("Error, in restoring the database") utils.Fatal("Error, in restoring the database")
@@ -68,7 +89,7 @@ func RestoreDatabase(file string) {
} else if extension == ".sql" { } else if extension == ".sql" {
//Restore from sql file //Restore from sql file
str := "cat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME")
_, err := exec.Command("bash", "-c", str).Output() _, err := exec.Command("bash", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal("Error in restoring the database", err) utils.Fatal("Error in restoring the database", err)
@@ -79,12 +100,13 @@ func RestoreDatabase(file string) {
} }
} else { } else {
utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", storagePath, file)) utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", tmpPath, file))
} }
} }
} }
func s3Restore(file, s3Path string) {
// Restore database from S3 //func s3Restore(file, s3Path string) {
MountS3Storage(s3Path) // // Restore database from S3
RestoreDatabase(file) // MountS3Storage(s3Path)
} // RestoreDatabase(file)
//}

View File

@@ -3,6 +3,7 @@ package pkg
const s3MountPath string = "/s3mnt" const s3MountPath string = "/s3mnt"
const s3fsPasswdFile string = "/etc/passwd-s3fs" const s3fsPasswdFile string = "/etc/passwd-s3fs"
const cronLogFile = "/var/log/pg-bkup.log" const cronLogFile = "/var/log/pg-bkup.log"
const tmpPath = "/tmp/pg-bkup"
const backupCronFile = "/usr/local/bin/backup_cron.sh" const backupCronFile = "/usr/local/bin/backup_cron.sh"
var ( var (

107
utils/s3.go Normal file
View File

@@ -0,0 +1,107 @@
package utils
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"net/http"
"os"
"path/filepath"
)
// CreateSession creates a new AWS session
func CreateSession() (*session.Session, error) {
//key := aws.String("testobject")
endPoint := os.Getenv("S3_ENDPOINT")
//bucket := os.Getenv("BUCKET_NAME")
region := os.Getenv("REGION")
accessKey := os.Getenv("ACCESS_KEY")
secretKey := os.Getenv("SECRET_KEY")
// Configure to use MinIO Server
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
Endpoint: aws.String(endPoint),
Region: aws.String(region),
DisableSSL: aws.Bool(false),
S3ForcePathStyle: aws.Bool(true),
}
return session.NewSession(s3Config)
}
// UploadFileToS3 uploads a file to S3 with a given prefix
func UploadFileToS3(filePath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
file, err := os.Open(filepath.Join(filePath, key))
if err != nil {
return err
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
objectKey := fmt.Sprintf("%s/%s", prefix, key)
buffer := make([]byte, fileInfo.Size())
file.Read(buffer)
fileBytes := bytes.NewReader(buffer)
fileType := http.DetectContentType(buffer)
_, err = svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
Body: fileBytes,
ContentLength: aws.Int64(fileInfo.Size()),
ContentType: aws.String(fileType),
})
if err != nil {
return err
}
return nil
}
func DownloadFile(destinationPath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil {
fmt.Println("Failed to create file", err)
return err
}
defer file.Close()
objectKey := fmt.Sprintf("%s/%s", prefix, key)
downloader := s3manager.NewDownloader(sess)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
if err != nil {
fmt.Println("Failed to download file", err)
return err
}
fmt.Println("Bytes size", numBytes)
Info("Backup downloaded to ", file.Name())
return nil
}

View File

@@ -9,6 +9,7 @@ package utils
import ( import (
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"io"
"io/fs" "io/fs"
"os" "os"
) )
@@ -46,6 +47,42 @@ func WriteToFile(filePath, content string) error {
_, err = file.WriteString(content) _, err = file.WriteString(content)
return err return err
} }
func DeleteFile(filePath string) error {
err := os.Remove(filePath)
if err != nil {
return fmt.Errorf("failed to delete file: %v", err)
}
return nil
}
func CopyFile(src, dst string) error {
// Open the source file for reading
sourceFile, err := os.Open(src)
if err != nil {
return fmt.Errorf("failed to open source file: %v", err)
}
defer sourceFile.Close()
// Create the destination file
destinationFile, err := os.Create(dst)
if err != nil {
return fmt.Errorf("failed to create destination file: %v", err)
}
defer destinationFile.Close()
// Copy the content from source to destination
_, err = io.Copy(destinationFile, sourceFile)
if err != nil {
return fmt.Errorf("failed to copy file: %v", err)
}
// Flush the buffer to ensure all data is written
err = destinationFile.Sync()
if err != nil {
return fmt.Errorf("failed to sync destination file: %v", err)
}
return nil
}
func ChangePermission(filePath string, mod int) { func ChangePermission(filePath string, mod int) {
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil { if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
Fatalf("Error changing permissions of %s: %v\n", filePath, err) Fatalf("Error changing permissions of %s: %v\n", filePath, err)