Skip to content

Commit

Permalink
Merge branch 'master' into ADBDEV-5693
Browse files Browse the repository at this point in the history
  • Loading branch information
Stolb27 authored Jun 28, 2024
2 parents bac9ce9 + bb75d5a commit 6a7e453
Show file tree
Hide file tree
Showing 12 changed files with 446 additions and 45 deletions.
4 changes: 3 additions & 1 deletion backup/predata_relations.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,9 @@ func SplitTablesByPartitionType(tables []Table, includeList []options.Relation)
// since the COPY will be called on the top-most root partition. It just so
// happens that those particular partition types will always have an
// AttachPartitionInfo initialized.
if table.AttachPartitionInfo == (AttachPartitionInfo{}) {
// Or if the IsParentInExtension is set, it means that the parent of this partition is in the extension and
// we need to backup it as new root.
if table.AttachPartitionInfo == (AttachPartitionInfo{}) || table.PartitionLevelInfo.IsParentInExtension {
dataTables = append(dataTables, table)
}
}
Expand Down
43 changes: 31 additions & 12 deletions backup/queries_table_defs.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,11 +152,12 @@ func ConstructDefinitionsForTables(connectionPool *dbconn.DBConn, tableRelations
*/

type PartitionLevelInfo struct {
Oid uint32
Level string
RootName string
RootOid uint32
Name string
Oid uint32
Level string
RootName string
RootOid uint32
Name string
IsParentInExtension bool
}

func GetPartitionTableMap(connectionPool *dbconn.DBConn) map[uint32]PartitionLevelInfo {
Expand All @@ -165,15 +166,17 @@ func GetPartitionTableMap(connectionPool *dbconn.DBConn) map[uint32]PartitionLev
'p' AS level,
'' AS rootname,
0 AS rootoid,
quote_ident(pc.relname) AS name
quote_ident(pc.relname) AS name,
false AS isparentinextension
FROM pg_partition p
JOIN pg_class pc ON p.parrelid = pc.oid
UNION ALL
SELECT r.parchildrelid AS oid,
CASE WHEN p.parlevel = levels.pl THEN 'l' ELSE 'i' END AS level,
quote_ident(cparent.relname) AS rootname,
cparent.oid AS rootoid,
quote_ident(c.relname) AS name
quote_ident(c.relname) AS name,
false AS isparentinextension
FROM pg_partition p
JOIN pg_partition_rule r ON p.oid = r.paroid
JOIN pg_class cparent ON cparent.oid = p.parrelid
Expand All @@ -194,10 +197,13 @@ func GetPartitionTableMap(connectionPool *dbconn.DBConn) map[uint32]PartitionLev
CASE WHEN p.partrelid IS NOT NULL AND c.relispartition = false THEN 0
ELSE rc.oid
END AS rootoid,
quote_ident(c.relname) AS name
quote_ident(c.relname) AS name,
pd.objid is not NULL as isparentinextension
FROM pg_class c
LEFT JOIN pg_partitioned_table p ON c.oid = p.partrelid
LEFT JOIN pg_class rc ON pg_partition_root(c.oid) = rc.oid
LEFT JOIN pg_inherits pi ON c.oid = pi.inhrelid
LEFT JOIN pg_depend pd ON objid = pi.inhparent AND deptype = 'e'
WHERE c.relispartition = true OR c.relkind = 'p'`

query := ""
Expand Down Expand Up @@ -653,19 +659,32 @@ func GetTableInheritance(connectionPool *dbconn.DBConn, tables []Relation) map[u
}
// If we are filtering on tables, we only want to record dependencies on other tables in the list
if len(tableOidList) > 0 {
tableFilterStr = fmt.Sprintf("\nAND i.inhrelid IN (%s)", strings.Join(tableOidList, ","))
tableFilterStr = fmt.Sprintf("\nWHERE i.inhrelid IN (%s)", strings.Join(tableOidList, ","))
}
}

// In 7X and later, tables in extensions should not be filtered out, since the parent name is
// later used for creating DDL for partitioned tables whose parent tables may be in the extension.
// Filtering can also lead to a loss of inheritance if the parent table is in an extension.
// For 6X and earlier, we don't want to change the behavior, so we filter out the tables from the extension.
extensionFilter := ""
if connectionPool.Version.Before("7") {
if tableFilterStr == "" {
extensionFilter += "\nWHERE "
} else {
extensionFilter += " AND "
}
extensionFilter += ExtensionFilterClause("p")
}

query := fmt.Sprintf(`
SELECT i.inhrelid AS oid,
quote_ident(n.nspname) || '.' || quote_ident(p.relname) AS referencedobject
FROM pg_inherits i
JOIN pg_class p ON i.inhparent = p.oid
JOIN pg_namespace n ON p.relnamespace = n.oid
WHERE %s%s
ORDER BY i.inhrelid, i.inhseqno`,
ExtensionFilterClause("p"), tableFilterStr)
%s%s
ORDER BY i.inhrelid, i.inhseqno`, tableFilterStr, extensionFilter)

results := make([]Dependency, 0)
resultMap := make(map[uint32][]string)
Expand Down
200 changes: 200 additions & 0 deletions end_to_end/end_to_end_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2690,4 +2690,204 @@ LANGUAGE plpgsql NO SQL;`)
Expect(string(output)).To(ContainSubstring("Multiple timestamp directories found"))
})
})

Describe("Backup partition whose parent is in extension [7X]", func() {
BeforeEach(func() {
if backupConn.Version.Before("7") {
Skip("not relevant for 6X and earlier")
}
_ = os.Chdir("resources")
command := exec.Command("make", "USE_PGXS=1", "install")
mustRunCommand(command)
_ = os.Chdir("..")

testhelper.AssertQueryRuns(backupConn, `CREATE EXTENSION test_ext6;`)
})

AfterEach(func() {
testhelper.AssertQueryRuns(backupConn, "DROP EXTENSION IF EXISTS test_ext6 CASCADE;")
testhelper.AssertQueryRuns(restoreConn, "DROP EXTENSION IF EXISTS test_ext6 CASCADE;")
})

It("backup the partition whose parent is in the extension", func() {
testhelper.AssertQueryRuns(backupConn, `
CREATE TABLE test_part PARTITION OF t_part FOR VALUES FROM (10) TO (20) PARTITION BY LIST(c);
CREATE TABLE part_a PARTITION OF test_part FOR VALUES IN ('a');
CREATE TABLE part_b PARTITION OF test_part FOR VALUES IN ('b');
CREATE TABLE part_c PARTITION OF t_part_1_prt_1 FOR VALUES IN ('c');
CREATE TABLE test2_part PARTITION OF t_part FOR VALUES FROM (30) TO (40);
INSERT INTO part_a SELECT a, 15, a, 'a' FROM generate_series(1, 10)a;
INSERT INTO part_b SELECT a, 16, a, 'b' FROM generate_series(1, 20)a;
INSERT INTO part_c SELECT a, 0, a, 'c' FROM generate_series(1, 30)a;
INSERT INTO test2_part SELECT a, 35, a, 'test' FROM generate_series(1, 40)a;`)

timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
metadataFileContents := string(getMetdataFileContents(backupDir, timestamp, "metadata.sql"))
Expect(metadataFileContents).To(ContainSubstring("test_part"))
Expect(metadataFileContents).To(ContainSubstring("part_a"))
Expect(metadataFileContents).To(ContainSubstring("part_b"))
Expect(metadataFileContents).To(ContainSubstring("part_c"))
Expect(metadataFileContents).To(ContainSubstring("test2_part"))

gprestore(gprestorePath, restoreHelperPath, timestamp, "--backup-dir", backupDir, "--redirect-db", "restoredb")

assertDataRestored(restoreConn, map[string]int{
"public.test_part": 30,
"public.part_a": 10,
"public.part_b": 20,
"public.part_c": 30,
"public.test2_part": 40,
})

assertArtifactsCleaned(restoreConn, timestamp)
})

It("backup the partition whose parent is in the extension with '--leaf-partition-data' flag", func() {
testhelper.AssertQueryRuns(backupConn, `
CREATE TABLE test_part PARTITION OF t_part FOR VALUES FROM (10) TO (20) PARTITION BY LIST(c);
CREATE TABLE part_a PARTITION OF test_part FOR VALUES IN ('a');
CREATE TABLE part_b PARTITION OF test_part FOR VALUES IN ('b');
CREATE TABLE part_c PARTITION OF t_part_1_prt_1 FOR VALUES IN ('c');
CREATE TABLE test2_part PARTITION OF t_part FOR VALUES FROM (30) TO (40);
INSERT INTO part_a SELECT a, 15, a, 'a' FROM generate_series(1, 10)a;
INSERT INTO part_b SELECT a, 16, a, 'b' FROM generate_series(1, 20)a;
INSERT INTO part_c SELECT a, 0, a, 'c' FROM generate_series(1, 30)a;
INSERT INTO test2_part SELECT a, 35, a, 'test' FROM generate_series(1, 40)a;`)

timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--leaf-partition-data")
metadataFileContents := string(getMetdataFileContents(backupDir, timestamp, "metadata.sql"))
Expect(metadataFileContents).To(ContainSubstring("test_part"))
Expect(metadataFileContents).To(ContainSubstring("part_a"))
Expect(metadataFileContents).To(ContainSubstring("part_b"))
Expect(metadataFileContents).To(ContainSubstring("part_c"))
Expect(metadataFileContents).To(ContainSubstring("test2_part"))

gprestore(gprestorePath, restoreHelperPath, timestamp, "--backup-dir", backupDir, "--redirect-db", "restoredb")

assertDataRestored(restoreConn, map[string]int{
"public.test_part": 30,
"public.part_a": 10,
"public.part_b": 20,
"public.part_c": 30,
"public.test2_part": 40,
})

assertArtifactsCleaned(restoreConn, timestamp)
})

It("ignore partitions specified in exclude-table during backup", func() {
testhelper.AssertQueryRuns(backupConn, `
CREATE TABLE part_c PARTITION OF t_part_1_prt_1 FOR VALUES IN ('c');
CREATE TABLE part_d PARTITION OF t_part_1_prt_1 FOR VALUES IN ('d');
INSERT INTO part_c SELECT a, 0, a, 'c' FROM generate_series(1, 10)a;
INSERT INTO part_d SELECT a, 0, a, 'd' FROM generate_series(1, 20)a;`)

timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--leaf-partition-data",
"--exclude-table", "public.part_d")

metadataFileContents := string(getMetdataFileContents(backupDir, timestamp, "metadata.sql"))
Expect(metadataFileContents).To(ContainSubstring("part_c"))
Expect(metadataFileContents).ToNot(ContainSubstring("part_d"))

gprestore(gprestorePath, restoreHelperPath, timestamp, "--backup-dir", backupDir, "--redirect-db", "restoredb")

assertDataRestored(restoreConn, map[string]int{
"public.part_c": 10,
})

assertTablesNotRestored(restoreConn, []string{"part_d"})

assertArtifactsCleaned(restoreConn, timestamp)
})

It("ignore partitions specified in include-table during backup", func() {
testhelper.AssertQueryRuns(backupConn, `
CREATE TABLE part_c PARTITION OF t_part_1_prt_1 FOR VALUES IN ('c');
CREATE TABLE part_d PARTITION OF t_part_1_prt_1 FOR VALUES IN ('d');
INSERT INTO part_c SELECT a, 0, a, 'c' FROM generate_series(1, 10)a;
INSERT INTO part_d SELECT a, 0, a, 'd' FROM generate_series(1, 20)a;`)

timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--leaf-partition-data",
"--include-table", "public.part_d")

metadataFileContents := string(getMetdataFileContents(backupDir, timestamp, "metadata.sql"))
Expect(metadataFileContents).ToNot(ContainSubstring("part_c"))
Expect(metadataFileContents).To(ContainSubstring("part_d"))

testhelper.AssertQueryRuns(restoreConn, "CREATE EXTENSION test_ext6;")
gprestore(gprestorePath, restoreHelperPath, timestamp, "--backup-dir", backupDir, "--redirect-db", "restoredb")

assertDataRestored(restoreConn, map[string]int{
"public.part_d": 20,
})

assertTablesNotRestored(restoreConn, []string{"part_c"})

assertArtifactsCleaned(restoreConn, timestamp)
})

It("ignore partitions specified in exclude-table during restore", func() {
testhelper.AssertQueryRuns(backupConn, `
CREATE TABLE part_c PARTITION OF t_part_1_prt_1 FOR VALUES IN ('c');
CREATE TABLE part_d PARTITION OF t_part_1_prt_1 FOR VALUES IN ('d');
INSERT INTO part_c SELECT a, 0, a, 'c' FROM generate_series(1, 10)a;
INSERT INTO part_d SELECT a, 0, a, 'd' FROM generate_series(1, 20)a;`)

timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--leaf-partition-data")

//to prevent new partitions from being created during restoration, we need to exclude the root.
gprestore(gprestorePath, restoreHelperPath, timestamp, "--backup-dir", backupDir, "--redirect-db", "restoredb",
"--exclude-table", "public.t_part")
assertTablesNotRestored(restoreConn, []string{"part_c", "part_d"})

assertArtifactsCleaned(restoreConn, timestamp)
})

It("allows the user to exclude partitions that hinder restore if the default partition in the extension contains data", func() {
testhelper.AssertQueryRuns(backupConn, `
ALTER EXTENSION test_ext6 DROP TABLE d_part_1_prt_extra;
ALTER TABLE d_part SPLIT DEFAULT PARTITION START (10) END (20)
INTO (PARTITION new_part, default partition);
ALTER EXTENSION test_ext6 ADD TABLE d_part_1_prt_extra;`)

timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--leaf-partition-data")
gprestore(gprestorePath, restoreHelperPath, timestamp, "--backup-dir", backupDir, "--redirect-db", "restoredb",
"--exclude-table", "public.d_part")

assertTablesNotRestored(restoreConn, []string{"d_part_1_prt_new_part"})

assertArtifactsCleaned(restoreConn, timestamp)
})

It("does not lose inheritance during backup if the parent table is in the extension", func() {
testhelper.AssertQueryRuns(backupConn, `
CREATE TABLE child (d int) INHERITS (parent);
`)

defer testhelper.AssertQueryRuns(backupConn, `DROP TABLE child;`)

timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
metadataFileContents := string(getMetdataFileContents(backupDir, timestamp, "metadata.sql"))
Expect(metadataFileContents).To(ContainSubstring("INHERITS (public.parent)"))
gprestore(gprestorePath, restoreHelperPath, timestamp, "--backup-dir", backupDir, "--redirect-db", "restoredb")

exists := make([]bool, 0)
err := restoreConn.Select(&exists,
"SELECT EXISTS(SELECT FROM pg_inherits WHERE inhrelid = 'child'::regclass AND inhparent = 'parent'::regclass);")

Expect(err).To(BeNil())
Expect(exists[0]).To(BeTrue())

assertArtifactsCleaned(restoreConn, timestamp)
})
})
})
4 changes: 2 additions & 2 deletions end_to_end/resources/Makefile
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
MODULE = test_extensions

EXTENSION = test_ext1 test_ext2 test_ext3 test_ext4 test_ext5
EXTENSION = test_ext1 test_ext2 test_ext3 test_ext4 test_ext5 test_ext6
DATA = test_ext1--1.0.sql test_ext2--1.0.sql test_ext3--1.0.sql \
test_ext4--1.0.sql test_ext5--1.0.sql
test_ext4--1.0.sql test_ext5--1.0.sql test_ext6--1.0.sql

PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
Expand Down
20 changes: 20 additions & 0 deletions end_to_end/resources/test_ext6--1.0.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
/* src/test/modules/test_extensions/test_ext5--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION test_ext6" to load this file. \quit

CREATE TABLE t_part (id int, a int, b int , c text)
DISTRIBUTED BY (id)
PARTITION BY RANGE (a)
SUBPARTITION BY LIST (c)
SUBPARTITION TEMPLATE
( SUBPARTITION a_part VALUES ('a'),
SUBPARTITION b_part VALUES ('b'))
(START (0) INCLUSIVE
END (3) EXCLUSIVE
EVERY (1));

CREATE TABLE d_part (a int, b int, c int) DISTRIBUTED BY (a)
PARTITION BY RANGE (b) (START (0) END (3) EVERY (1), default partition extra);
INSERT INTO d_part SELECT a, 10, a FROM generate_series(1, 100)a;

CREATE TABLE parent (a int, b int) DISTRIBUTED BY (a);
3 changes: 3 additions & 0 deletions end_to_end/resources/test_ext6.control
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
comment = 'Test extension 6'
default_version = '1.0'
relocatable = true
12 changes: 7 additions & 5 deletions helper/backup_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package helper

import (
"bufio"
"bytes"
"fmt"
"io"
"os"
Expand Down Expand Up @@ -34,6 +35,7 @@ func doBackupAgent() error {

preloadCreatedPipes(oidList, *copyQueue)
var currentPipe string
var errBuf bytes.Buffer
/*
* It is important that we create the reader before creating the writer
* so that we establish a connection to the first pipe (created by gpbackup)
Expand Down Expand Up @@ -62,7 +64,7 @@ func doBackupAgent() error {
return err
}
if i == 0 {
pipeWriter, writeCmd, err = getBackupPipeWriter()
pipeWriter, writeCmd, err = getBackupPipeWriter(&errBuf)
if err != nil {
logError(fmt.Sprintf("Oid %d: Error encountered getting backup pipe writer: %v", oid, err))
return err
Expand Down Expand Up @@ -124,10 +126,10 @@ func getBackupPipeReader(currentPipe string) (io.Reader, io.ReadCloser, error) {
return reader, readHandle, nil
}

func getBackupPipeWriter() (pipe BackupPipeWriterCloser, writeCmd *exec.Cmd, err error) {
func getBackupPipeWriter(errBuf *bytes.Buffer) (pipe BackupPipeWriterCloser, writeCmd *exec.Cmd, err error) {
var writeHandle io.WriteCloser
if *pluginConfigFile != "" {
writeCmd, writeHandle, err = startBackupPluginCommand()
writeCmd, writeHandle, err = startBackupPluginCommand(errBuf)
} else {
writeHandle, err = os.Create(*dataFile)
}
Expand Down Expand Up @@ -155,7 +157,7 @@ func getBackupPipeWriter() (pipe BackupPipeWriterCloser, writeCmd *exec.Cmd, err
return nil, nil, fmt.Errorf("unknown compression type '%s' (compression level %d)", *compressionType, *compressionLevel)
}

func startBackupPluginCommand() (*exec.Cmd, io.WriteCloser, error) {
func startBackupPluginCommand(errBuf *bytes.Buffer) (*exec.Cmd, io.WriteCloser, error) {
pluginConfig, err := utils.ReadPluginConfig(*pluginConfigFile)
if err != nil {
// error logging handled by calling functions
Expand All @@ -169,7 +171,7 @@ func startBackupPluginCommand() (*exec.Cmd, io.WriteCloser, error) {
// error logging handled by calling functions
return nil, nil, err
}
writeCmd.Stderr = &errBuf
writeCmd.Stderr = errBuf
err = writeCmd.Start()
if err != nil {
// error logging handled by calling functions
Expand Down
Loading

0 comments on commit 6a7e453

Please sign in to comment.