From 9ef4afd7b3a4dd340281c6f5aa4dd74d470bafd6 Mon Sep 17 00:00:00 2001 From: Andrew Repp Date: Tue, 18 Apr 2023 20:09:55 -0500 Subject: [PATCH 01/45] Under certain flags, place timestamp+pid lockfile in backup-dir To support specific use cases, currently mainly gpcopy, if the correct flags are provided gpbackup should place its timestamp+pid lockfile in the backupdir, instead of in /tmp. This is to facilitate the highly-parallel metadata-only backups that gpcopy uses when dumping metadata. The flags required to elicit this behavior are: metadata-only, no-history, and backup-dir. --- backup/queries_globals.go | 2 +- backup/wrappers.go | 10 ++++- end_to_end/end_to_end_suite_test.go | 69 +++++++++++++++++++++++++++++ 3 files changed, 79 insertions(+), 2 deletions(-) diff --git a/backup/queries_globals.go b/backup/queries_globals.go index 3ed13d7eb..0d0a6b694 100644 --- a/backup/queries_globals.go +++ b/backup/queries_globals.go @@ -295,7 +295,7 @@ func GetResourceGroups[T ResourceGroupBefore7 | ResourceGroupAtLeast7](connectio } results := make([]T, 0) - err := connectionPool.Select(&results, query) // AJR TODO -- not sure this is smart enough to deserialize into a generic struct. let's find out! + err := connectionPool.Select(&results, query) gplog.FatalOnError(err) return results } diff --git a/backup/wrappers.go b/backup/wrappers.go index 456f35393..dbfcb28e5 100644 --- a/backup/wrappers.go +++ b/backup/wrappers.go @@ -166,7 +166,15 @@ func initializeBackupReport(opts options.Options) { func createBackupLockFile(timestamp string) { var err error - timestampLockFile := fmt.Sprintf("/tmp/%s.lck", timestamp) + var timestampLockFile string + metadataOnly := MustGetFlagBool(options.METADATA_ONLY) + backupDir := MustGetFlagString(options.BACKUP_DIR) + noHistory := MustGetFlagBool(options.NO_HISTORY) + if metadataOnly && noHistory && backupDir != "" { + timestampLockFile = fmt.Sprintf("%s/%s.lck", backupDir, timestamp) + } else { + timestampLockFile = fmt.Sprintf("/tmp/%s.lck", timestamp) + } backupLockFile, err = lockfile.New(timestampLockFile) gplog.FatalOnError(err) err = backupLockFile.TryLock() diff --git a/end_to_end/end_to_end_suite_test.go b/end_to_end/end_to_end_suite_test.go index 0852354e3..36ed048b8 100644 --- a/end_to_end/end_to_end_suite_test.go +++ b/end_to_end/end_to_end_suite_test.go @@ -14,6 +14,7 @@ import ( "sort" "strconv" "strings" + "sync" "testing" "time" @@ -2133,4 +2134,72 @@ LANGUAGE plpgsql NO SQL;`) Entry("Will correctly handle filtering on child table", "schemaone.measurement", "schemaone.measurement_peaktemp_catchall", "9", "8", "3"), ) }) + Describe("Concurrent backups will only work if given unique backup directories and the flags: metadata-only, backup-dir, and no-history", func() { + var backupDir1 string + var backupDir2 string + var backupDir3 string + BeforeEach(func() { + backupDir1 = path.Join(backupDir, "conc_test1") + backupDir2 = path.Join(backupDir, "conc_test2") + backupDir3 = path.Join(backupDir, "conc_test3") + os.Mkdir(backupDir1, 0777) + os.Mkdir(backupDir2, 0777) + os.Mkdir(backupDir3, 0777) + }) + AfterEach(func() { + os.RemoveAll(backupDir1) + os.RemoveAll(backupDir2) + os.RemoveAll(backupDir3) + }) + It("backs up successfully with the correct flags", func() { + command1 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir1, "--no-history", "--metadata-only") + command2 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir2, "--no-history", "--metadata-only") + command3 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir3, "--no-history", "--metadata-only") + commands := []*exec.Cmd{command1, command2, command3} + + var backWg sync.WaitGroup + errchan := make(chan error, len(commands)) + for _, cmd := range commands { + backWg.Add(1) + go func(command *exec.Cmd) { + defer backWg.Done() + _, err := command.CombinedOutput() + errchan <- err + }(cmd) + } + backWg.Wait() + close(errchan) + + for err := range errchan { + Expect(err).ToNot(HaveOccurred()) + } + }) + It("fails without the correct flags", func() { + command1 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir1) + command2 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir1) + command3 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir1) + commands := []*exec.Cmd{command1, command2, command3} + + var backWg sync.WaitGroup + errchan := make(chan error, len(commands)) + for _, cmd := range commands { + backWg.Add(1) + go func(command *exec.Cmd) { + defer backWg.Done() + _, err := command.CombinedOutput() + errchan <- err + }(cmd) + } + backWg.Wait() + close(errchan) + + errcounter := 0 + for err := range errchan { + if err != nil { + errcounter++ + } + } + Expect(errcounter > 0).To(BeTrue()) + }) + }) }) From 78bfb340ae58c4cc203092296a7246f459a39825 Mon Sep 17 00:00:00 2001 From: Andrew Repp Date: Mon, 17 Apr 2023 15:43:50 -0500 Subject: [PATCH 02/45] Update datadomain Vault vars We're updating our CI to validate against the newest version of datadomain. Update vault vars to new point to the new instances. --- ci/gpbackup-generated.yml | 10 +++++----- ci/gpbackup-release-generated.yml | 10 +++++----- ci/templates/gpbackup-tpl.yml | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/ci/gpbackup-generated.yml b/ci/gpbackup-generated.yml index 4bdecaa46..93d2955af 100644 --- a/ci/gpbackup-generated.yml +++ b/ci/gpbackup-generated.yml @@ -12,7 +12,7 @@ ## file (example: templates/gpbackup-tpl.yml) and regenerate the pipeline ## using appropriate tool (example: gen_pipeline.py -p gpbackup-release). ## ---------------------------------------------------------------------- -## Generated by gen_pipeline.py at: 2023-04-11 14:51:34.942939 +## Generated by gen_pipeline.py at: 2023-04-24 16:47:47.148063 ## Template file: gpbackup-tpl.yml ## Pipeline Name: gpbackup ## Nightly Trigger: True @@ -163,11 +163,11 @@ anchors: bucket_path: clusters-google/ - &ddboost_params - DD_SOURCE_HOST: ((dp/prod/datadomain_source_host_gcp)) + DD_SOURCE_HOST: ((dp/prod/datadomain_711_source_host_gcp)) DD_USER: ((dp/prod/datadomain_user)) - DD_PW: ((dp/prod/datadomain_password_gcp)) - DD_DEST_HOST: ((dp/prod/datadomain_dest_host_gcp)) - DD_ENCRYPTED_PW: ((dp/prod/encrypted_datadomain_password_gcp)) + DD_PW: ((dp/prod/datadomain_711_password_gcp)) + DD_DEST_HOST: ((dp/prod/datadomain_711_dest_host_gcp)) + DD_ENCRYPTED_PW: ((dp/prod/encrypted_datadomain_711_password_gcp)) - &slack_alert put: slack-alert diff --git a/ci/gpbackup-release-generated.yml b/ci/gpbackup-release-generated.yml index f0fce4961..b2a339ef7 100644 --- a/ci/gpbackup-release-generated.yml +++ b/ci/gpbackup-release-generated.yml @@ -12,7 +12,7 @@ ## file (example: templates/gpbackup-tpl.yml) and regenerate the pipeline ## using appropriate tool (example: gen_pipeline.py -p gpbackup-release). ## ---------------------------------------------------------------------- -## Generated by gen_pipeline.py at: 2023-04-14 15:09:22.178313 +## Generated by gen_pipeline.py at: 2023-04-24 16:47:47.164907 ## Template file: gpbackup-tpl.yml ## Pipeline Name: gpbackup-release ## Nightly Trigger: True @@ -156,11 +156,11 @@ anchors: bucket_path: clusters-google/ - &ddboost_params - DD_SOURCE_HOST: ((dp/prod/datadomain_source_host_gcp)) + DD_SOURCE_HOST: ((dp/prod/datadomain_711_source_host_gcp)) DD_USER: ((dp/prod/datadomain_user)) - DD_PW: ((dp/prod/datadomain_password_gcp)) - DD_DEST_HOST: ((dp/prod/datadomain_dest_host_gcp)) - DD_ENCRYPTED_PW: ((dp/prod/encrypted_datadomain_password_gcp)) + DD_PW: ((dp/prod/datadomain_711_password_gcp)) + DD_DEST_HOST: ((dp/prod/datadomain_711_dest_host_gcp)) + DD_ENCRYPTED_PW: ((dp/prod/encrypted_datadomain_711_password_gcp)) - &slack_alert put: slack-alert diff --git a/ci/templates/gpbackup-tpl.yml b/ci/templates/gpbackup-tpl.yml index 9c04f52fb..05f9bb92a 100644 --- a/ci/templates/gpbackup-tpl.yml +++ b/ci/templates/gpbackup-tpl.yml @@ -176,11 +176,11 @@ anchors: bucket_path: clusters-google/ - &ddboost_params - DD_SOURCE_HOST: ((dp/$$DEV_PROD$$/datadomain_source_host_gcp)) - DD_USER: ((dp/$$DEV_PROD$$/datadomain_user)) - DD_PW: ((dp/$$DEV_PROD$$/datadomain_password_gcp)) - DD_DEST_HOST: ((dp/$$DEV_PROD$$/datadomain_dest_host_gcp)) - DD_ENCRYPTED_PW: ((dp/$$DEV_PROD$$/encrypted_datadomain_password_gcp)) + DD_SOURCE_HOST: ((dp/$$DEV_PROD$$/datadomain_711_source_host_gcp)) + DD_USER: ((dp/prod/datadomain_user)) + DD_PW: ((dp/prod/datadomain_711_password_gcp)) + DD_DEST_HOST: ((dp/$$DEV_PROD$$/datadomain_711_dest_host_gcp)) + DD_ENCRYPTED_PW: ((dp/prod/encrypted_datadomain_711_password_gcp)) {% if is_prod or "gpbackup-release" == pipeline_name %} - &slack_alert From 4f30f34bc905c8262e6da893053001e4ef1a1291 Mon Sep 17 00:00:00 2001 From: Andrew Repp Date: Tue, 25 Apr 2023 09:11:52 -0500 Subject: [PATCH 03/45] Add skip to an end to end test This test relies on a flag that was only added to gpbackup 1.28.0, so skip if we're testing with an older version. --- end_to_end/end_to_end_suite_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/end_to_end/end_to_end_suite_test.go b/end_to_end/end_to_end_suite_test.go index 36ed048b8..cbe56add4 100644 --- a/end_to_end/end_to_end_suite_test.go +++ b/end_to_end/end_to_end_suite_test.go @@ -2152,6 +2152,8 @@ LANGUAGE plpgsql NO SQL;`) os.RemoveAll(backupDir3) }) It("backs up successfully with the correct flags", func() { + // --no-history flag was added in 1.28.0 + skipIfOldBackupVersionBefore("1.28.0") command1 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir1, "--no-history", "--metadata-only") command2 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir2, "--no-history", "--metadata-only") command3 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir3, "--no-history", "--metadata-only") From fcd0c6c0ba0dc2a029815efd86ac537eb62db243 Mon Sep 17 00:00:00 2001 From: Andrew Repp Date: Tue, 2 May 2023 11:20:54 -0500 Subject: [PATCH 04/45] Update CI artifact names Releng team changed some "rhel" artifacts to "el" to reflect more general approach to RHEL binary compatibility. Update our CI to pull from the correct locations. --- ci/gpbackup-generated.yml | 4 ++-- ci/gpbackup-release-generated.yml | 4 ++-- ci/regression/regression_pipeline.yml | 2 +- ci/templates/gpbackup-tpl.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ci/gpbackup-generated.yml b/ci/gpbackup-generated.yml index 93d2955af..10cec1121 100644 --- a/ci/gpbackup-generated.yml +++ b/ci/gpbackup-generated.yml @@ -12,7 +12,7 @@ ## file (example: templates/gpbackup-tpl.yml) and regenerate the pipeline ## using appropriate tool (example: gen_pipeline.py -p gpbackup-release). ## ---------------------------------------------------------------------- -## Generated by gen_pipeline.py at: 2023-04-24 16:47:47.148063 +## Generated by gen_pipeline.py at: 2023-05-02 11:19:59.432083 ## Template file: gpbackup-tpl.yml ## Pipeline Name: gpbackup ## Nightly Trigger: True @@ -332,7 +332,7 @@ resources: source: bucket: ((dp/prod/gcs-ci-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.tar.gz + regexp: server/published/main/server-rc-(.*)-el8_x86_64.tar.gz - name: bin_gpdb_5x_stable_centos6 type: s3 diff --git a/ci/gpbackup-release-generated.yml b/ci/gpbackup-release-generated.yml index b2a339ef7..5bfbb2437 100644 --- a/ci/gpbackup-release-generated.yml +++ b/ci/gpbackup-release-generated.yml @@ -12,7 +12,7 @@ ## file (example: templates/gpbackup-tpl.yml) and regenerate the pipeline ## using appropriate tool (example: gen_pipeline.py -p gpbackup-release). ## ---------------------------------------------------------------------- -## Generated by gen_pipeline.py at: 2023-04-24 16:47:47.164907 +## Generated by gen_pipeline.py at: 2023-05-02 11:19:59.448810 ## Template file: gpbackup-tpl.yml ## Pipeline Name: gpbackup-release ## Nightly Trigger: True @@ -331,7 +331,7 @@ resources: source: bucket: ((dp/prod/gcs-ci-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.tar.gz + regexp: server/published/main/server-rc-(.*)-el8_x86_64.tar.gz - name: bin_gpdb_5x_stable_centos6 type: s3 diff --git a/ci/regression/regression_pipeline.yml b/ci/regression/regression_pipeline.yml index f091c2994..9655931ef 100644 --- a/ci/regression/regression_pipeline.yml +++ b/ci/regression/regression_pipeline.yml @@ -119,7 +119,7 @@ resources: source: bucket: ((dp/prod/gcs-ci-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.tar.gz + regexp: server/published/main/server-rc-(.*)-el8_x86_64.tar.gz #### Misc Other #### - name: weekly-trigger diff --git a/ci/templates/gpbackup-tpl.yml b/ci/templates/gpbackup-tpl.yml index 05f9bb92a..7d7e74e66 100644 --- a/ci/templates/gpbackup-tpl.yml +++ b/ci/templates/gpbackup-tpl.yml @@ -369,7 +369,7 @@ resources: source: bucket: ((dp/prod/gcs-ci-bucket)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/server-rc-(.*)-rhel8_x86_64.tar.gz + regexp: server/published/main/server-rc-(.*)-el8_x86_64.tar.gz - name: bin_gpdb_5x_stable_centos6 type: s3 From fada3b688947f9f8f6561dddfd8519fb3b030763 Mon Sep 17 00:00:00 2001 From: Andrew Repp Date: Wed, 3 May 2023 08:49:07 -0500 Subject: [PATCH 05/45] Add test for delete_replica We test most of our plugin functionality in the gpbackup CI, so add a test to our plugin suite to cover the delete_replica function added to our ddboost plugin. --- plugins/plugin_test.sh | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/plugins/plugin_test.sh b/plugins/plugin_test.sh index 7f4aabc20..c793bcf0c 100755 --- a/plugins/plugin_test.sh +++ b/plugins/plugin_test.sh @@ -5,7 +5,7 @@ set -o pipefail plugin=$1 plugin_config=$2 secondary_plugin_config=$3 -MINIMUM_API_VERSION="0.3.0" +MINIMUM_API_VERSION="0.4.0" # ---------------------------------------------- # Test suite setup @@ -394,6 +394,44 @@ fi echo "[PASSED] fails with unknown command" set -e +# ---------------------------------------------- +# Delete replica function +# ---------------------------------------------- +if [[ "$plugin" == *gpbackup_ddboost_plugin ]] && [[ "$plugin_config" == *ddboost_config_replication.yaml ]]; then + time_second_for_repl=$(expr 99999999999999 - $(od -vAn -N5 -tu < /dev/urandom | tr -d ' \n')) + current_date_for_repl=$(echo $time_second_for_repl | cut -c 1-8) + + testdir_for_repl="/tmp/testseg/backups/${current_date_for_repl}/${time_second_for_repl}" + testdata_for_repl="$testdir_for_repl/testdata_$time_second_for_repl.txt" + + mkdir -p $testdir_for_repl + + echo "[RUNNING] backup_data with replica" + $plugin setup_plugin_for_backup $plugin_config $testdir_for_repl coordinator \"-1\" + $plugin setup_plugin_for_backup $plugin_config $testdir_for_repl segment_host + $plugin setup_plugin_for_backup $plugin_config $testdir_for_repl segment \"0\" + + + echo $data | $plugin backup_data $plugin_config $testdata_for_repl + echo "[PASSED] backup_data with replica" + + echo "[RUNNING] delete_replica" + $plugin delete_replica $plugin_config $time_second_for_repl + echo "[PASSED] delete_replica" + + set +e + echo "[RUNNING] delete_replica again to verify warning" + output=`$plugin delete_replica $plugin_config $time_second_for_repl` + + if [[ "$output" != *"already deleted"* ]]; then + echo "Failed to delete replica using plugin" + exit 1 + fi + echo "[PASSED] delete_replica again to verify warning" + set -e + +fi + # ---------------------------------------------- # Run test gpbackup and gprestore with plugin # ---------------------------------------------- From 8addcac3072f405576ae770629594c5fcfdd45ba Mon Sep 17 00:00:00 2001 From: Andrew Repp Date: Mon, 8 May 2023 16:48:48 -0500 Subject: [PATCH 06/45] Reorganize writes to history file We have historically written out history records only at the end of the process, and in many failure cases this means no record at all is available. This commit restructures to write out the full history entry very early on, but leaves Status as "In progress" until the cleanup phase, where it will be updated to show whether the backup was successful or a failure --- backup/backup.go | 88 +++++++++++++++++++++++++------------- backup/incremental.go | 5 ++- backup/incremental_test.go | 3 ++ backup/wrappers.go | 4 +- history/history.go | 1 + report/report_test.go | 2 +- 6 files changed, 69 insertions(+), 34 deletions(-) diff --git a/backup/backup.go b/backup/backup.go index 56d266e09..2ddd423a4 100644 --- a/backup/backup.go +++ b/backup/backup.go @@ -141,26 +141,13 @@ func DoBackup() { gplog.Info("Metadata will be written to %s", metadataFilename) metadataFile := utils.NewFileWithByteCountFromFile(metadataFilename) - backupSessionGUC(metadataFile) - if !MustGetFlagBool(options.DATA_ONLY) { - isFullBackup := len(MustGetFlagStringArray(options.INCLUDE_RELATION)) == 0 - if isFullBackup && !MustGetFlagBool(options.WITHOUT_GLOBALS) { - backupGlobals(metadataFile) - } - - isFilteredBackup := !isFullBackup - backupPredata(metadataFile, metadataTables, isFilteredBackup) - backupPostdata(metadataFile) - } - /* * We check this in the backup report rather than the flag because we * perform a metadata only backup if the database contains no tables * or only external tables */ + backupSetTables := dataTables if !backupReport.MetadataOnly { - backupSetTables := dataTables - targetBackupRestorePlan := make([]history.RestorePlanEntry, 0) if targetBackupTimestamp != "" { gplog.Info("Basing incremental backup off of backup with timestamp = %s", targetBackupTimestamp) @@ -171,8 +158,37 @@ func DoBackup() { } backupReport.RestorePlan = PopulateRestorePlan(backupSetTables, targetBackupRestorePlan, dataTables) + } + + // As soon as all necessary data is available, capture the backup into history database + if !MustGetFlagBool(options.NO_HISTORY) { + historyDBName := globalFPInfo.GetBackupHistoryDatabasePath() + historyDB, err := history.InitializeHistoryDatabase(historyDBName) + if err != nil { + gplog.FatalOnError(err) + } else { + err = history.StoreBackupHistory(historyDB, &backupReport.BackupConfig) + historyDB.Close() + gplog.FatalOnError(err) + } + } + + backupSessionGUC(metadataFile) + if !MustGetFlagBool(options.DATA_ONLY) { + isFullBackup := len(MustGetFlagStringArray(options.INCLUDE_RELATION)) == 0 + if isFullBackup && !MustGetFlagBool(options.WITHOUT_GLOBALS) { + backupGlobals(metadataFile) + } + + isFilteredBackup := !isFullBackup + backupPredata(metadataFile, metadataTables, isFilteredBackup) + backupPostdata(metadataFile) + } + + if !backupReport.MetadataOnly { backupData(backupSetTables) } + printDataBackupWarnings(numExtOrForeignTables) if MustGetFlagBool(options.WITH_STATS) { backupStatistics(metadataTables) @@ -382,7 +398,6 @@ func DoTeardown() { if statErr != nil { // Even if this isn't os.IsNotExist, don't try to write a report file in case of further errors return } - historyDBName := globalFPInfo.GetBackupHistoryDatabasePath() historyFileLegacyName := globalFPInfo.GetBackupHistoryFilePath() reportFilename := globalFPInfo.GetBackupReportFilePath() configFilename := globalFPInfo.GetConfigFilePath() @@ -396,24 +411,12 @@ func DoTeardown() { } if backupReport != nil { - if !backupFailed { + if backupFailed { + backupReport.BackupConfig.Status = history.BackupStatusFailed + } else { backupReport.BackupConfig.Status = history.BackupStatusSucceed } backupReport.ConstructBackupParamsString() - backupReport.BackupConfig.SegmentCount = len(globalCluster.ContentIDs) - 1 - - if !MustGetFlagBool(options.NO_HISTORY) { - historyDB, err := history.InitializeHistoryDatabase(historyDBName) - if err != nil { - gplog.Error(fmt.Sprintf("%v", err)) - } else { - err = history.StoreBackupHistory(historyDB, &backupReport.BackupConfig) - historyDB.Close() - if err != nil { - gplog.Error(fmt.Sprintf("%v", err)) - } - } - } history.WriteConfigFile(&backupReport.BackupConfig, configFilename) if backupReport.BackupConfig.EndTime == "" { @@ -473,6 +476,31 @@ func DoCleanup(backupFailed bool) { } utils.CleanUpHelperFilesOnAllHosts(globalCluster, globalFPInfo) } + + // The gpbackup_history entry is written to the DB with an "In Progress" status very early + // on. If we get to cleanup and the backup succeeded, mark it as a success, otherwise mark + // it as a failure. Between our signal handler and recovering panics, there should be no + // way for gpbackup to exit that leaves the entry in the initial status. + + if !MustGetFlagBool(options.NO_HISTORY) { + var statusString string + if backupFailed { + statusString = history.BackupStatusFailed + } else { + statusString = history.BackupStatusSucceed + } + historyDBName := globalFPInfo.GetBackupHistoryDatabasePath() + historyDB, err := history.InitializeHistoryDatabase(historyDBName) + if err != nil { + gplog.Error(fmt.Sprintf("Unable to update history database. Error: %v", err)) + } else { + _, err := historyDB.Exec(fmt.Sprintf("UPDATE backups SET status='%s' WHERE timestamp='%s'", statusString, globalFPInfo.Timestamp)) + historyDB.Close() + if err != nil { + gplog.Error(fmt.Sprintf("Unable to update history database. Error: %v", err)) + } + } + } } err := backupLockFile.Unlock() if err != nil && backupLockFile != "" { diff --git a/backup/incremental.go b/backup/incremental.go index 9aa6ab8e1..c99856650 100644 --- a/backup/incremental.go +++ b/backup/incremental.go @@ -70,13 +70,14 @@ func GetLatestMatchingBackupConfig(historyDBPath string, currentBackupConfig *hi historyDB, _ := history.InitializeHistoryDatabase(historyDBPath) whereClause := fmt.Sprintf(`backup_dir = '%s' AND database_name = '%s' AND leaf_partition_data = %v - AND plugin = '%s' AND single_data_file = %v AND compressed = %v AND date_deleted = ''`, + AND plugin = '%s' AND single_data_file = %v AND compressed = %v AND date_deleted = '' AND status = '%s'`, MustGetFlagString(options.BACKUP_DIR), currentBackupConfig.DatabaseName, MustGetFlagBool(options.LEAF_PARTITION_DATA), currentBackupConfig.Plugin, MustGetFlagBool(options.SINGLE_DATA_FILE), - currentBackupConfig.Compressed) + currentBackupConfig.Compressed, + history.BackupStatusSucceed) getBackupTimetampsQuery := fmt.Sprintf(` SELECT timestamp diff --git a/backup/incremental_test.go b/backup/incremental_test.go index 76d01762d..ac04c2a70 100644 --- a/backup/incremental_test.go +++ b/backup/incremental_test.go @@ -96,6 +96,7 @@ var _ = Describe("backup/incremental tests", func() { { DatabaseName: "test1", Timestamp: "timestamp3", + Status: history.BackupStatusSucceed, ExcludeRelations: []string{}, ExcludeSchemas: []string{}, IncludeRelations: []string{}, @@ -104,6 +105,7 @@ var _ = Describe("backup/incremental tests", func() { { DatabaseName: "test2", Timestamp: "timestamp2", + Status: history.BackupStatusSucceed, ExcludeRelations: []string{}, ExcludeSchemas: []string{}, IncludeRelations: []string{}, @@ -113,6 +115,7 @@ var _ = Describe("backup/incremental tests", func() { { DatabaseName: "test1", Timestamp: "timestamp1", + Status: history.BackupStatusSucceed, ExcludeRelations: []string{}, ExcludeSchemas: []string{}, IncludeRelations: []string{}, diff --git a/backup/wrappers.go b/backup/wrappers.go index dbfcb28e5..1a541f643 100644 --- a/backup/wrappers.go +++ b/backup/wrappers.go @@ -133,7 +133,7 @@ func NewBackupConfig(dbName string, dbVersion string, backupVersion string, plug Timestamp: timestamp, WithoutGlobals: MustGetFlagBool(options.WITHOUT_GLOBALS), WithStatistics: MustGetFlagBool(options.WITH_STATS), - Status: history.BackupStatusFailed, + Status: history.BackupStatusInProgress, } return &backupConfig @@ -157,6 +157,8 @@ func initializeBackupReport(opts options.Options) { dbSize = GetDBSize(connectionPool) } + config.SegmentCount = len(globalCluster.ContentIDs) - 1 + backupReport = &report.Report{ DatabaseSize: dbSize, BackupConfig: *config, diff --git a/history/history.go b/history/history.go index a3d83db20..6d2885710 100644 --- a/history/history.go +++ b/history/history.go @@ -20,6 +20,7 @@ type RestorePlanEntry struct { } const ( + BackupStatusInProgress = "In Progress" BackupStatusSucceed = "Success" BackupStatusFailed = "Failure" ) diff --git a/report/report_test.go b/report/report_test.go index d238e7a94..c9abe857d 100644 --- a/report/report_test.go +++ b/report/report_test.go @@ -336,7 +336,7 @@ restore status: Success but non-fatal errors occurred. See log file .+ Plugin: "/tmp/plugin.sh", Timestamp: "timestamp1", IncludeTableFiltered: true, - Status: history.BackupStatusFailed, + Status: history.BackupStatusInProgress, }, backupConfig) }) }) From 5b570d749cd223937a57004056eb4b691594d0f7 Mon Sep 17 00:00:00 2001 From: Andrew Repp Date: Tue, 9 May 2023 09:59:46 -0500 Subject: [PATCH 07/45] Handle panics from goroutines Our current cleanup mechanisms rely on a recover() call to pick up FatalOnError. However, this mechanism does not work if the panic is called from a separate goroutine. As such, our cleanup has some gaps in it that need to be plugged up. We identify four cases where a panic can be invoked from a goroutine. In these cases we set up a channel for bubbling the panics up to the main process, and invoke gplog.Fatal() at the earliest opportunity. We structure the defers such that waitgroups are still marked Done() even in case of a panic, so that we can keep the deadlock protection they offer. In the future any we will avoid invoking panic() from within goroutines, so that this handling will not be necessary. --- backup/data.go | 22 ++++++++++++++++++++++ restore/data.go | 13 +++++++++++++ restore/parallel.go | 13 +++++++++++++ 3 files changed, 48 insertions(+) diff --git a/backup/data.go b/backup/data.go index 19262eed2..ff7696653 100644 --- a/backup/data.go +++ b/backup/data.go @@ -165,11 +165,17 @@ func backupDataForAllTables(tables []Table) []map[uint32]int64 { * 3) Processes tables only in the event that the other workers encounter locking issues. * Worker 0 already has all locks on the tables so it will not run into locking issues. */ + panicChan := make(chan error) rowsCopiedMaps[0] = make(map[uint32]int64) for connNum := 1; connNum < connectionPool.NumConns; connNum++ { rowsCopiedMaps[connNum] = make(map[uint32]int64) workerPool.Add(1) go func(whichConn int) { + defer func() { + if panicErr := recover(); panicErr != nil { + panicChan <- fmt.Errorf("%v", panicErr) + } + }() defer workerPool.Done() /* If the --leaf-partition-data flag is not set, the parent and all leaf * partition data are treated as a single table and will be assigned to a single worker. @@ -243,12 +249,18 @@ func backupDataForAllTables(tables []Table) []map[uint32]int64 { } }(connNum) } + // Special goroutine to handle deferred tables // Handle all tables deferred by the deadlock detection. This can only be // done with the main worker thread, worker 0, because it has // AccessShareLocks on all the tables already. deferredWorkerDone := make(chan bool) go func() { + defer func() { + if panicErr := recover(); panicErr != nil { + panicChan <- fmt.Errorf("%v", panicErr) + } + }() for _, table := range tables { for { state, _ := oidMap.Load(table.Oid) @@ -270,8 +282,18 @@ func backupDataForAllTables(tables []Table) []map[uint32]int64 { } deferredWorkerDone <- true }() + close(tasks) workerPool.Wait() + + // Allow panics to crash from the main process, invoking DoCleanup + select { + case err := <-panicChan: + gplog.Fatal(err, "") + default: + // no panic, nothing to do + } + // If not using synchronized snapshots, // check if all workers were terminated due to lock issues. if backupSnapshot == "" { diff --git a/restore/data.go b/restore/data.go index 0c7a968a7..be2a8fd5d 100644 --- a/restore/data.go +++ b/restore/data.go @@ -213,10 +213,16 @@ func restoreDataFromTimestamp(fpInfo filepath.FilePathInfo, dataEntries []toc.Co var workerPool sync.WaitGroup var numErrors int32 var mutex = &sync.Mutex{} + panicChan := make(chan error) for i := 0; i < connectionPool.NumConns; i++ { workerPool.Add(1) go func(whichConn int) { + defer func() { + if panicErr := recover(); panicErr != nil { + panicChan <- fmt.Errorf("%v", panicErr) + } + }() defer workerPool.Done() setGUCsForConnection(gucStatements, whichConn) @@ -277,6 +283,13 @@ func restoreDataFromTimestamp(fpInfo filepath.FilePathInfo, dataEntries []toc.Co } close(tasks) workerPool.Wait() + // Allow panics to crash from the main process, invoking DoCleanup + select { + case err := <-panicChan: + gplog.Fatal(err, "") + default: + // no panic, nothing to do + } if numErrors > 0 { fmt.Println("") diff --git a/restore/parallel.go b/restore/parallel.go index 1f990a60f..253bde1a7 100644 --- a/restore/parallel.go +++ b/restore/parallel.go @@ -64,15 +64,28 @@ func ExecuteStatements(statements []toc.StatementWithType, progressBar utils.Pro connNum := connectionPool.ValidateConnNum(whichConn...) executeStatementsForConn(tasks, &fatalErr, &numErrors, progressBar, connNum, executeInParallel) } else { + panicChan := make(chan error) for i := 0; i < connectionPool.NumConns; i++ { workerPool.Add(1) go func(connNum int) { + defer func() { + if panicErr := recover(); panicErr != nil { + panicChan <- fmt.Errorf("%v", panicErr) + } + }() defer workerPool.Done() connNum = connectionPool.ValidateConnNum(connNum) executeStatementsForConn(tasks, &fatalErr, &numErrors, progressBar, connNum, executeInParallel) }(i) } workerPool.Wait() + // Allow panics to crash from the main process, invoking DoCleanup + select { + case err := <-panicChan: + gplog.Fatal(err, "") + default: + // no panic, nothing to do + } } if fatalErr != nil { fmt.Println("") From 60430ba2001b8026e23138503dd95541463e4e58 Mon Sep 17 00:00:00 2001 From: Andrew Repp Date: Thu, 11 May 2023 10:24:24 -0500 Subject: [PATCH 08/45] Set gp_quicklz_fallback GUC for GPDB7+ GPDB7 has removed support for QuickLZ. Any tables backed up with QuickLZ will cause an error on restore, unless this GUC is set. This GUC will allow the server to silently fall back to using zstd, and allow for successful restores. --- restore/wrappers.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/restore/wrappers.go b/restore/wrappers.go index 07e658e52..292d7a44b 100644 --- a/restore/wrappers.go +++ b/restore/wrappers.go @@ -129,6 +129,13 @@ SET default_with_oids = off; // during COPY FROM SEGMENT. ANALYZE should be run separately. setupQuery += "SET gp_autostats_mode = 'none';\n" + // GPDB7 removed support for QuickLZ. To support creating tables + // from backups done with QuickLZ, a GUC was added to allow silent + // fallback to zstd + if connectionPool.Version.AtLeast("7") { + setupQuery += "SET gp_quicklz_fallback = on;\n" + } + for i := 0; i < connectionPool.NumConns; i++ { connectionPool.MustExec(setupQuery, i) } From fc807b0cceb4b3e98599442c49ce53f998732d05 Mon Sep 17 00:00:00 2001 From: Andrew Repp Date: Fri, 5 May 2023 13:09:01 -0500 Subject: [PATCH 09/45] Fix cleanup calls in plugin_test.sh Previously our CI plugin tests had been calling noops for the cleanup step. Replace these with calls to delete_directory where possible. An additional cleanup job will be scheduled to run occasionally to handle things that are not easily cleaned up from plugin storage in this manner. --- ci/scripts/ddboost-plugin-tests.bash | 4 +-- ci/scripts/s3-plugin-tests.bash | 2 +- end_to_end/plugin_test.go | 2 +- plugins/example_plugin.bash | 14 +++++++-- plugins/plugin_test.sh | 46 +++++++++++++++++----------- 5 files changed, 44 insertions(+), 24 deletions(-) diff --git a/ci/scripts/ddboost-plugin-tests.bash b/ci/scripts/ddboost-plugin-tests.bash index 78270c36d..396053709 100755 --- a/ci/scripts/ddboost-plugin-tests.bash +++ b/ci/scripts/ddboost-plugin-tests.bash @@ -84,9 +84,9 @@ CONFIG pushd \${GOPATH}/src/github.com/greenplum-db/gpbackup/plugins -./plugin_test.sh \${GPHOME}/bin/gpbackup_ddboost_plugin \${HOME}/ddboost_config_replication.yaml \${HOME}/ddboost_config_replication_restore.yaml +./plugin_test.sh \${GPHOME}/bin/gpbackup_ddboost_plugin \${HOME}/ddboost_config_replication.yaml gpbackup_tests${GPDB_VERSION} \${HOME}/ddboost_config_replication_restore.yaml -./plugin_test.sh \${GPHOME}/bin/gpbackup_ddboost_plugin \${HOME}/ddboost_config.yaml \${HOME}/ddboost_config_replication_restore.yaml +./plugin_test.sh \${GPHOME}/bin/gpbackup_ddboost_plugin \${HOME}/ddboost_config.yaml gpbackup_tests${GPDB_VERSION} \${HOME}/ddboost_config_replication_restore.yaml # exercise boostfs, which is mounted at /data/gpdata/dd_dir pushd \${GOPATH}/src/github.com/greenplum-db/gpbackup diff --git a/ci/scripts/s3-plugin-tests.bash b/ci/scripts/s3-plugin-tests.bash index 339d4eb16..dd967cefc 100755 --- a/ci/scripts/s3-plugin-tests.bash +++ b/ci/scripts/s3-plugin-tests.bash @@ -30,7 +30,7 @@ options: CONFIG pushd ~/go/src/github.com/greenplum-db/gpbackup/plugins - ./plugin_test.sh \${GPHOME}/bin/gpbackup_s3_plugin \${HOME}/s3_config.yaml + ./plugin_test.sh \${GPHOME}/bin/gpbackup_s3_plugin \${HOME}/s3_config.yaml test/backup popd SCRIPT diff --git a/end_to_end/plugin_test.go b/end_to_end/plugin_test.go index 4c537469b..6dda05407 100644 --- a/end_to_end/plugin_test.go +++ b/end_to_end/plugin_test.go @@ -456,7 +456,7 @@ var _ = Describe("End to End plugin tests", func() { } pluginsDir := fmt.Sprintf("%s/src/github.com/greenplum-db/gpbackup/plugins", os.Getenv("GOPATH")) copyPluginToAllHosts(backupConn, fmt.Sprintf("%s/example_plugin.bash", pluginsDir)) - command := exec.Command("bash", "-c", fmt.Sprintf("%s/plugin_test.sh %s/example_plugin.bash %s/example_plugin_config.yaml", pluginsDir, pluginsDir, pluginsDir)) + command := exec.Command("bash", "-c", fmt.Sprintf("%s/plugin_test.sh %s/example_plugin.bash %s/example_plugin_config.yaml /tmp/plugin_dest", pluginsDir, pluginsDir, pluginsDir)) mustRunCommand(command) _ = os.RemoveAll("/tmp/plugin_dest") diff --git a/plugins/example_plugin.bash b/plugins/example_plugin.bash index 9f6498e58..fc594e28f 100755 --- a/plugins/example_plugin.bash +++ b/plugins/example_plugin.bash @@ -90,9 +90,19 @@ delete_backup() { } +list_directory() { + echo "list_directory $1 /tmp/plugin_dest" >> /tmp/plugin_out.txt + ls /tmp/plugin_dest +} + +delete_directory() { + echo "delete_directory $1 /tmp/plugin_dest" >> /tmp/plugin_out.txt + rm -rf /tmp/plugin_dest +} + plugin_api_version(){ - echo "0.4.0" - echo "0.4.0" >> /tmp/plugin_out.txt + echo "0.5.0" + echo "0.5.0" >> /tmp/plugin_out.txt } --version(){ diff --git a/plugins/plugin_test.sh b/plugins/plugin_test.sh index c793bcf0c..ad839cd8f 100755 --- a/plugins/plugin_test.sh +++ b/plugins/plugin_test.sh @@ -4,7 +4,8 @@ set -o pipefail plugin=$1 plugin_config=$2 -secondary_plugin_config=$3 +plugin_dir=$3 +secondary_plugin_config=$4 MINIMUM_API_VERSION="0.4.0" # ---------------------------------------------- @@ -12,9 +13,9 @@ MINIMUM_API_VERSION="0.4.0" # This will put small amounts of data in the # plugin destination location # ---------------------------------------------- -if [ $# -lt 2 ] || [ $# -gt 3 ] +if [ $# -lt 3 ] || [ $# -gt 4 ] then - echo "Usage: plugin_test.sh [path_to_executable] [plugin_config] [optional_config_for_secondary_destination]" + echo "Usage: plugin_test.sh [path_to_executable] [plugin_config] [plugin_testdir] [optional_config_for_secondary_destination]" exit 1 fi @@ -36,9 +37,10 @@ testdata="$testdir/testdata_$time_second.txt" test_no_data="$testdir/test_no_data_$time_second.txt" testdatasmall="$testdir/testdatasmall_$time_second.txt" testdatalarge="$testdir/testdatalarge_$time_second.txt" - logdir="/tmp/test_bench_logs" +plugin_testdir="$plugin_dir/${current_date}/${time_second}" + text="this is some text" data=`LC_ALL=C tr -dc 'A-Za-z0-9' Date: Sat, 6 May 2023 14:48:24 -0500 Subject: [PATCH 10/45] Add a plugin cleanup job to our CI Currently our plugin jobs leave behind a lot of data and don't clean it up at all. Prior commit added some in-test cleanup, but it still leaves behind a fair bit. Add a CI job that will run weekly and drop all contents from our plugin storage. Note that this requires the job to be in a pipeline sitting around for a week. This may not be the case for short-lived dev pipelines. Might be worth considering making a skinny pipeline to leave sitting in the DP concourse so that the DP storage also gets cleaned automatically. --- ci/cleanup/plugin-cleanup.yml | 101 ++++++++++++++++++++++++++++ ci/scripts/clean-plugins.bash | 120 ++++++++++++++++++++++++++++++++++ ci/tasks/clean-plugins.yml | 17 +++++ ci/templates/gpbackup-tpl.yml | 13 ++++ 4 files changed, 251 insertions(+) create mode 100644 ci/cleanup/plugin-cleanup.yml create mode 100755 ci/scripts/clean-plugins.bash create mode 100644 ci/tasks/clean-plugins.yml diff --git a/ci/cleanup/plugin-cleanup.yml b/ci/cleanup/plugin-cleanup.yml new file mode 100644 index 000000000..78d08feac --- /dev/null +++ b/ci/cleanup/plugin-cleanup.yml @@ -0,0 +1,101 @@ +# Fly this pipeline with the command: +# fly -t TARGET sp -p gpbackup_clean_plugin_storage -c gpbackup/ci/cleanup/plugin-cleanup.yml -v gpbackup-git-branch=main -v source_host=SOURCE_IP -v dest_host=DEST_IP +--- +############################################## +groups: +- name: All + jobs: + - clean-plugin-storage +############################################## + +############################################## +resource_types: +- name: gcs + type: registry-image + source: + repository: frodenas/gcs-resource +############################################## + +############################################## +resources: +- name: gpdb6_src + type: git + icon: github-circle + source: + uri: https://github.com/greenplum-db/gpdb + branch: 6X_STABLE + +- name: weekly-trigger + type: time + source: + location: America/Los_Angeles + days: [Sunday] + start: 6:00 AM + stop: 7:00 AM + +- name: rocky8-gpdb6-image + type: registry-image + source: + repository: gcr.io/data-gpdb-public-images/gpdb6-rocky8-test + tag: latest + +- name: bin_gpdb_6x_rhel8 + type: gcs + source: + bucket: ((dp/prod/gcs-ci-bucket)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: server/published/gpdb6/server-rc-(.*)-rhel8_x86_64((dp/dev/rc-build-type-gcs)).tar.gz + +- name: gpbackup + type: git + icon: github-circle + source: + uri: https://github.com/greenplum-db/gpbackup + branch: ((gpbackup-git-branch)) + +- name: gppkgs + type: gcs + icon: google + source: + bucket: ((dp/dev/gcs-ci-bucket)) + json_key: ((dp/dev/gcp_svc_acct_key)) + versioned_file: gpbackup/intermediates/gpbackup-gppkgs.tar.gz +############################################## + +############################################## +anchors: +- &ddboost_params + # TODO -- this is currently flown by providing with IP addresses passed to command line. + # This prevents needing to duplicate dev/prod files, and also avoids rewriting gen_pipeline.py + # If we move to a one-concourse approach, these can easily be interpolated from Vault again + DD_SOURCE_HOST: ((source_host)) + DD_DEST_HOST: ((dest_host)) + DD_USER: ((dp/prod/datadomain_user)) + DD_PW: ((dp/prod/datadomain_711_password_gcp)) + DD_ENCRYPTED_PW: ((dp/prod/encrypted_datadomain_711_password_gcp)) +############################################## + +############################################## +jobs: +- name: clean-plugin-storage + plan: + - in_parallel: + - get: gpdb_src + resource: gpdb6_src + - get: bin_gpdb + resource: bin_gpdb_6x_rhel8 + - get: rocky8-gpdb6-image + - get: gpbackup + - get: gppkgs + - get: weekly-trigger + trigger: true + - task: clean-plugins + image: rocky8-gpdb6-image + file: gpbackup/ci/tasks/clean-plugins.yml + params: + <<: *ddboost_params + REGION: us-west-2 + AWS_ACCESS_KEY_ID: ((aws-bucket-access-key-id)) + AWS_SECRET_ACCESS_KEY: ((aws-bucket-secret-access-key)) + BUCKET: ((dp/dev/gpbackup-s3-plugin-test-bucket)) +############################################## diff --git a/ci/scripts/clean-plugins.bash b/ci/scripts/clean-plugins.bash new file mode 100755 index 000000000..0449fae65 --- /dev/null +++ b/ci/scripts/clean-plugins.bash @@ -0,0 +1,120 @@ +#!/bin/bash + +# We deliberately do not set -e here, because these commands error if the +# directory happens to have already been deleted, which we do not want. +set -x + +# Add locale for locale tests +localedef -c -i de_DE -f UTF-8 de_DE +echo LANG=\"de_DE\" >> /etc/locale.conf +source /etc/locale.conf + +## old versions of ld have a bug that our CGO libs exercise. update binutils to avoid it +OLDLDVERSION=$(ld --version | grep "2.25"); +if [[ ${OS} == "RHEL6" ]]; then + ## have to use vault to update because centos6 is EOL + sudo yum install -y centos-release-scl + echo "https://vault.centos.org/6.10/os/x86_64/" > /var/cache/yum/x86_64/6/C6.10-base/mirrorlist.txt + echo "http://vault.centos.org/6.10/extras/x86_64/" > /var/cache/yum/x86_64/6/C6.10-extras/mirrorlist.txt + echo "http://vault.centos.org/6.10/updates/x86_64/" > /var/cache/yum/x86_64/6/C6.10-updates/mirrorlist.txt + mkdir /var/cache/yum/x86_64/6/centos-sclo-rh/ + echo "http://vault.centos.org/6.10/sclo/x86_64/rh" > /var/cache/yum/x86_64/6/centos-sclo-rh/mirrorlist.txt + mkdir /var/cache/yum/x86_64/6/centos-sclo-sclo/ + echo "http://vault.centos.org/6.10/sclo/x86_64/sclo" > /var/cache/yum/x86_64/6/centos-sclo-sclo/mirrorlist.txt + sudo yum install -y devtoolset-7-binutils* + \cp /opt/rh/devtoolset-7/root/usr/bin/ld /usr/bin/ld +elif [[ $OLDLDVERSION != "" ]]; then + yum install -y binutils +fi + +mkdir /tmp/untarred +tar -xzf gppkgs/gpbackup-gppkgs.tar.gz -C /tmp/untarred + +if [[ ! -f bin_gpdb/bin_gpdb.tar.gz ]] ; then + mv bin_gpdb/*.tar.gz bin_gpdb/bin_gpdb.tar.gz +fi +source gpdb_src/concourse/scripts/common.bash +time install_gpdb +time ./gpdb_src/concourse/scripts/setup_gpadmin_user.bash +time NUM_PRIMARY_MIRROR_PAIRS=${LOCAL_CLUSTER_SIZE} make_cluster + +# generate configs for the CI storage used in our tests +cat << CONFIG > /tmp/ddboost_config_local.yaml +executablepath: \${GPHOME}/bin/gpbackup_ddboost_plugin +options: + hostname: ${DD_SOURCE_HOST} + username: ${DD_USER} + storage_unit: GPDB + directory: gpbackup_tests6 + pgport: 6000 + password: ${DD_ENCRYPTED_PW} + password_encryption: "on" + gpbackup_ddboost_plugin: 66706c6c6e677a6965796f68343365303133336f6c73366b316868326764 +CONFIG + +cat << CONFIG > /tmp/ddboost_config_remote.yaml +executablepath: \${GPHOME}/bin/gpbackup_ddboost_plugin +options: + hostname: ${DD_DEST_HOST} + username: ${DD_USER} + storage_unit: GPDB + directory: gpbackup_tests6 + pgport: 6000 + password: ${DD_ENCRYPTED_PW} + password_encryption: "on" + gpbackup_ddboost_plugin: 66706c6c6e677a6965796f68343365303133336f6c73366b316868326764 +CONFIG + + cat << CONFIG > /tmp/s3_config.yaml +executablepath: \${GPHOME}/bin/gpbackup_s3_plugin +options: + region: ${REGION} + aws_access_key_id: ${AWS_ACCESS_KEY_ID} + aws_secret_access_key: ${AWS_SECRET_ACCESS_KEY} + bucket: ${BUCKET} + folder: test/backup + backup_multipart_chunksize: 100MB + restore_multipart_chunksize: 100MB +CONFIG + +cat <