Skip to content

Commit 66269b5

Browse files
authored
branch-4.0: [fix](test)Validate database is empty before DROP DATABASE (#58344) (#58423)
#58344
1 parent 6a23383 commit 66269b5

File tree

2 files changed

+243
-1
lines changed

2 files changed

+243
-1
lines changed
Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
// Licensed to the Apache Software Foundation (ASF) under one
2+
// or more contributor license agreements. See the NOTICE file
3+
// distributed with this work for additional information
4+
// regarding copyright ownership. The ASF licenses this file
5+
// to you under the Apache License, Version 2.0 (the
6+
// "License"); you may not use this file except in compliance
7+
// with the License. You may obtain a copy of the License at
8+
//
9+
// http://www.apache.org/licenses/LICENSE-2.0
10+
//
11+
// Unless required by applicable law or agreed to in writing,
12+
// software distributed under the License is distributed on an
13+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14+
// KIND, either express or implied. See the License for the
15+
// specific language governing permissions and limitations
16+
// under the License.
17+
import static groovy.test.GroovyAssert.shouldFail;
18+
import java.util.concurrent.ThreadLocalRandom
19+
20+
suite("azure_blob_all_test", "p2,external,new_catalog_property") {
21+
22+
23+
String abfsAzureAccountName = context.config.otherConfigs.get("abfsAccountName")
24+
String abfsAzureAccountKey = context.config.otherConfigs.get("abfsAccountKey")
25+
String abfsContainer = context.config.otherConfigs.get("abfsContainer")
26+
String abfsEndpoint = context.config.otherConfigs.get("abfsEndpoint")
27+
def abfs_azure_config_props = """
28+
"provider" = "azure",
29+
"azure.endpoint"="${abfsEndpoint}",
30+
"azure.account_name" = "${abfsAzureAccountName}",
31+
"azure.account_key" = "${abfsAzureAccountKey}"
32+
"""
33+
34+
// Iceberg FS
35+
36+
def testIcebergTest = { String storage_props,String iceberg_fs_catalog_name, String protocol,String hdfsLocationType ->
37+
38+
iceberg_fs_catalog_name = iceberg_fs_catalog_name + "_" + ThreadLocalRandom.current().nextInt(100)
39+
sql """
40+
drop catalog if exists ${iceberg_fs_catalog_name};
41+
"""
42+
sql"""
43+
create catalog ${iceberg_fs_catalog_name} properties(
44+
'type'='iceberg',
45+
'iceberg.catalog.type'='hadoop',
46+
'warehouse'='${protocol}://${abfsContainer}@${abfsAzureAccountName}.${hdfsLocationType}.core.windows.net/regression/external/azure/${protocol}/iceberg_fs_warehouse/',
47+
${storage_props}
48+
);
49+
"""
50+
51+
sql """
52+
switch ${iceberg_fs_catalog_name}
53+
"""
54+
55+
sql """
56+
drop database if exists ${iceberg_fs_catalog_name}_db_test force;
57+
"""
58+
sql """
59+
create database ${iceberg_fs_catalog_name}_db_test;
60+
"""
61+
sql """
62+
use ${iceberg_fs_catalog_name}_db_test;
63+
"""
64+
sql """
65+
create table ${iceberg_fs_catalog_name}_table_test (id int, name string)
66+
"""
67+
sql """
68+
insert into ${iceberg_fs_catalog_name}_table_test values(1, 'iceberg_fs_abfs_test');
69+
"""
70+
def query_result = sql """
71+
select count(1) from ${iceberg_fs_catalog_name}_table_test;
72+
"""
73+
74+
assert query_result[0][0] == 1
75+
76+
sql """
77+
drop table if exists ${iceberg_fs_catalog_name}_table_test;
78+
"""
79+
sql """
80+
drop database if exists ${iceberg_fs_catalog_name}_db_test force;
81+
"""
82+
sql """
83+
drop catalog if exists ${iceberg_fs_catalog_name};
84+
"""
85+
}
86+
87+
88+
//abfs
89+
testIcebergTest(abfs_azure_config_props, "iceberg_fs_abfs_catalog", "abfs","dfs")
90+
testIcebergTest(abfs_azure_config_props, "iceberg_fs_abfss_catalog", "abfss","dfs")
91+
92+
93+
94+
//abfss
95+
def testPaimonTest = { String storage_props,String paimon_catalog_name, String protocol,String hdfsLocationType,String queryTbl ->
96+
sql """
97+
drop catalog if exists ${paimon_catalog_name};
98+
"""
99+
sql"""
100+
create catalog ${paimon_catalog_name} properties(
101+
'type'='paimon',
102+
'paimon.catalog.type'='filesystem',
103+
'warehouse'='${protocol}://${abfsContainer}@${abfsAzureAccountName}.${hdfsLocationType}.core.windows.net/regression/azure/${protocol}/paimon_fs_warehouse/',
104+
${abfs_azure_config_props}
105+
);
106+
"""
107+
108+
sql """
109+
switch ${paimon_catalog_name}
110+
"""
111+
112+
def query_result =sql """
113+
select * from ${paimon_catalog_name}.${queryTbl}
114+
"""
115+
println query_result
116+
117+
sql """
118+
drop catalog if exists ${paimon_catalog_name};
119+
"""
120+
}
121+
122+
// Paimon FS
123+
sql """
124+
set force_jni_scanner=false;
125+
"""
126+
127+
def paimon_fs_abfss_db_tbl = "paimon_fs_abfss_test_db.external_test_table"
128+
def paimon_fs_abfs_db_tbl = "paimon_fs_abfs_test_db.external_test_table"
129+
testPaimonTest(abfs_azure_config_props, "paimon_fs_abfs_catalog", "abfs","dfs",paimon_fs_abfs_db_tbl)
130+
testPaimonTest(abfs_azure_config_props, "paimon_fs_abfss_catalog", "abfss","dfs",paimon_fs_abfss_db_tbl)
131+
132+
// TODO: Enable this once BE's HDFS dependency management is fully ready.
133+
// This module requires higher-version JARs to support JDK 17 access.
134+
/* sql """
135+
set force_jni_scanner=true;
136+
"""
137+
testPaimonTest(abfs_azure_config_props, "paimon_fs_abfs_catalog", "abfs","dfs",paimon_fs_abfs_db_tbl)
138+
testPaimonTest(abfs_azure_config_props, "paimon_fs_abfss_catalog", "abfss","dfs",paimon_fs_abfss_db_tbl)*/
139+
140+
141+
}

regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy

Lines changed: 102 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@
1414
// KIND, either express or implied. See the License for the
1515
// specific language governing permissions and limitations
1616
// under the License.
17+
18+
import java.util.concurrent.ThreadLocalRandom
19+
1720
import static groovy.test.GroovyAssert.shouldFail;
1821
suite("iceberg_on_hms_and_filesystem_and_dlf", "p2,external,new_catalog_property") {
1922

@@ -33,7 +36,8 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", "p2,external,new_catalog_property
3336
switch ${catalog_name};
3437
"""
3538

36-
def db_name = prefix + "_db"
39+
def db_name = prefix + "_db"+ ThreadLocalRandom.current().nextInt(100);
40+
// Check if database exists
3741
sql """
3842
DROP DATABASE IF EXISTS ${db_name} FORCE;
3943
"""
@@ -65,6 +69,103 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", "p2,external,new_catalog_property
6569
SELECT * FROM ${table_name};
6670
"""
6771
assert queryResult.size() == 1
72+
def branch_name = prefix + "_branch"
73+
def tag_name = prefix + "_tag"
74+
sql """
75+
ALTER TABLE ${table_name} CREATE BRANCH ${branch_name};
76+
"""
77+
sql """
78+
ALTER TABLE ${table_name} CREATE TAG ${tag_name};
79+
"""
80+
sql """
81+
INSERT OVERWRITE TABLE ${table_name} VALUES (1, 'a', 10),(2, 'b', 20), (3, 'c', 30)
82+
"""
83+
def originalQueryResult = sql """
84+
SELECT * FROM ${table_name};
85+
"""
86+
assert originalQueryResult.size() == 3
87+
sql """
88+
insert into ${table_name}@branch(${branch_name}) values (4, 'd', 40)
89+
"""
90+
def branchQueryResult = sql """
91+
SELECT * FROM ${table_name}@branch(${branch_name});
92+
"""
93+
assert branchQueryResult.size() == 2
94+
95+
96+
def tagQueryResult = sql """
97+
SELECT * FROM ${table_name}@tag(${tag_name});
98+
"""
99+
assert tagQueryResult.size() == 1
100+
sql """
101+
ALTER TABLE ${table_name} drop branch ${branch_name};
102+
"""
103+
sql """
104+
ALTER TABLE ${table_name} drop tag ${tag_name};
105+
"""
106+
try {
107+
def sys_query_result = sql """
108+
SELECT * FROM ${table_name}\$files;
109+
"""
110+
println sys_query_result
111+
println "iceberg_meta_result SUCCESS" + catalog_name
112+
113+
def iceberg_meta_result = sql """
114+
SELECT snapshot_id FROM iceberg_meta(
115+
'table' = '${catalog_name}.${db_name}.${table_name}',
116+
'query_type' = 'snapshots'
117+
) order by committed_at desc;
118+
119+
"""
120+
def first_snapshot_id = iceberg_meta_result.get(0).get(0);
121+
def time_travel =sql """
122+
SELECT * FROM ${table_name} FOR VERSION AS OF ${first_snapshot_id};
123+
"""
124+
println time_travel
125+
126+
println "iceberg_time_travel SUCCESS" + catalog_name
127+
}catch (Exception e) {
128+
println catalog_name + "system info error"
129+
}
130+
131+
132+
sql """
133+
DROP TABLE ${table_name};
134+
"""
135+
//partition table
136+
table_name = prefix + "_partition_table"
137+
sql """
138+
CREATE TABLE ${table_name} (
139+
`ts` DATETIME COMMENT 'ts',
140+
`col1` BOOLEAN COMMENT 'col1',
141+
`col2` INT COMMENT 'col2',
142+
`col3` BIGINT COMMENT 'col3',
143+
`col4` FLOAT COMMENT 'col4',
144+
`col5` DOUBLE COMMENT 'col5',
145+
`col6` DECIMAL(9,4) COMMENT 'col6',
146+
`col7` STRING COMMENT 'col7',
147+
`col8` DATE COMMENT 'col8',
148+
`col9` DATETIME COMMENT 'col9',
149+
`pt1` STRING COMMENT 'pt1',
150+
`pt2` STRING COMMENT 'pt2'
151+
)
152+
PARTITION BY LIST (day(ts), pt1, pt2) ()
153+
PROPERTIES (
154+
'write-format'='orc',
155+
'compression-codec'='zlib'
156+
);
157+
"""
158+
159+
sql """
160+
INSERT OVERWRITE TABLE ${table_name} values
161+
('2023-01-01 00:00:00', true, 1, 1, 1.0, 1.0, 1.0000, '1', '2023-01-01', '2023-01-01 00:00:00', 'a', '1'),
162+
('2023-01-02 00:00:00', false, 2, 2, 2.0, 2.0, 2.0000, '2', '2023-01-02', '2023-01-02 00:00:00', 'b', '2'),
163+
('2023-01-03 00:00:00', true, 3, 3, 3.0, 3.0, 3.0000, '3', '2023-01-03', '2023-01-03 00:00:00', 'c', '3');
164+
"""
165+
def partitionQueryResult = sql """
166+
SELECT * FROM ${table_name} WHERE pt1='a' and pt2='1';
167+
"""
168+
assert partitionQueryResult.size() == 1
68169

69170
sql """
70171
DROP TABLE ${table_name};

0 commit comments

Comments
 (0)